linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill@shutemov.name>
To: Dave Hansen <dave.hansen@linux.intel.com>,
	Andy Lutomirski <luto@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>
Cc: David Rientjes <rientjes@google.com>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Kees Cook <keescook@chromium.org>, Will Drewry <wad@chromium.org>,
	"Edgecombe, Rick P" <rick.p.edgecombe@intel.com>,
	"Kleen, Andi" <andi.kleen@intel.com>,
	Liran Alon <liran.alon@oracle.com>,
	Mike Rapoport  <rppt@kernel.org>,
	x86@kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [RFCv2 13/16] KVM: Rework copy_to/from_guest() to avoid direct mapping
Date: Tue, 20 Oct 2020 09:18:56 +0300	[thread overview]
Message-ID: <20201020061859.18385-14-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20201020061859.18385-1-kirill.shutemov@linux.intel.com>

We are going unmap guest pages from direct mapping and cannot rely on it
for guest memory access. Use temporary kmap_atomic()-style mapping to
access guest memory.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 virt/kvm/kvm_main.c      |  27 ++++++++++-
 virt/lib/mem_protected.c | 101 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 126 insertions(+), 2 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4c008c7b4974..9b569b78874a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,6 +51,7 @@
 #include <linux/io.h>
 #include <linux/lockdep.h>
 #include <linux/kthread.h>
+#include <linux/pagewalk.h>
 
 #include <asm/processor.h>
 #include <asm/ioctl.h>
@@ -154,6 +155,12 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
 static unsigned long long kvm_createvm_count;
 static unsigned long long kvm_active_vms;
 
+void *kvm_map_page_atomic(struct page *page);
+void kvm_unmap_page_atomic(void *vaddr);
+
+int kvm_init_protected_memory(void);
+void kvm_exit_protected_memory(void);
+
 int __kvm_protect_memory(unsigned long start, unsigned long end, bool protect);
 
 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
@@ -2329,6 +2336,7 @@ int copy_from_guest(void *data, unsigned long hva, int len, bool protected)
 	int offset = offset_in_page(hva);
 	struct page *page;
 	int npages, seg;
+	void *vaddr;
 
 	if (!protected)
 		return __copy_from_user(data, (void __user *)hva, len);
@@ -2341,7 +2349,11 @@ int copy_from_guest(void *data, unsigned long hva, int len, bool protected)
 		npages = get_user_pages_unlocked(hva, 1, &page, FOLL_KVM);
 		if (npages != 1)
 			return -EFAULT;
-		memcpy(data, page_address(page) + offset, seg);
+
+		vaddr = kvm_map_page_atomic(page);
+		memcpy(data, vaddr + offset, seg);
+		kvm_unmap_page_atomic(vaddr);
+
 		put_page(page);
 		len -= seg;
 		hva += seg;
@@ -2356,6 +2368,7 @@ int copy_to_guest(unsigned long hva, const void *data, int len, bool protected)
 	int offset = offset_in_page(hva);
 	struct page *page;
 	int npages, seg;
+	void *vaddr;
 
 	if (!protected)
 		return __copy_to_user((void __user *)hva, data, len);
@@ -2369,7 +2382,11 @@ int copy_to_guest(unsigned long hva, const void *data, int len, bool protected)
 						 FOLL_WRITE | FOLL_KVM);
 		if (npages != 1)
 			return -EFAULT;
-		memcpy(page_address(page) + offset, data, seg);
+
+		vaddr = kvm_map_page_atomic(page);
+		memcpy(vaddr + offset, data, seg);
+		kvm_unmap_page_atomic(vaddr);
+
 		put_page(page);
 		len -= seg;
 		hva += seg;
@@ -4945,6 +4962,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 	if (r)
 		goto out_free;
 
+	if (IS_ENABLED(CONFIG_HAVE_KVM_PROTECTED_MEMORY) &&
+	    kvm_init_protected_memory())
+		goto out_unreg;
+
 	kvm_chardev_ops.owner = module;
 	kvm_vm_fops.owner = module;
 	kvm_vcpu_fops.owner = module;
@@ -4968,6 +4989,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 	return 0;
 
 out_unreg:
+	kvm_exit_protected_memory();
 	kvm_async_pf_deinit();
 out_free:
 	kmem_cache_destroy(kvm_vcpu_cache);
@@ -4989,6 +5011,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
 
 void kvm_exit(void)
 {
+	kvm_exit_protected_memory();
 	debugfs_remove_recursive(kvm_debugfs_dir);
 	misc_deregister(&kvm_dev);
 	kmem_cache_destroy(kvm_vcpu_cache);
diff --git a/virt/lib/mem_protected.c b/virt/lib/mem_protected.c
index 0b01dd74f29c..1dfe82534242 100644
--- a/virt/lib/mem_protected.c
+++ b/virt/lib/mem_protected.c
@@ -5,6 +5,100 @@
 #include <linux/vmalloc.h>
 #include <asm/tlbflush.h>
 
+static pte_t **guest_map_ptes;
+static struct vm_struct *guest_map_area;
+
+void *kvm_map_page_atomic(struct page *page)
+{
+	pte_t *pte;
+	void *vaddr;
+
+	preempt_disable();
+	pte = guest_map_ptes[smp_processor_id()];
+	vaddr = guest_map_area->addr + smp_processor_id() * PAGE_SIZE;
+	set_pte(pte, mk_pte(page, PAGE_KERNEL));
+	return vaddr;
+}
+EXPORT_SYMBOL_GPL(kvm_map_page_atomic);
+
+void kvm_unmap_page_atomic(void *vaddr)
+{
+	pte_t *pte = guest_map_ptes[smp_processor_id()];
+	set_pte(pte, __pte(0));
+	flush_tlb_one_kernel((unsigned long)vaddr);
+	preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_page_atomic);
+
+int kvm_init_protected_memory(void)
+{
+	guest_map_ptes = kmalloc_array(num_possible_cpus(),
+				       sizeof(pte_t *), GFP_KERNEL);
+	if (!guest_map_ptes)
+		return -ENOMEM;
+
+	guest_map_area = alloc_vm_area(PAGE_SIZE * num_possible_cpus(),
+				       guest_map_ptes);
+	if (!guest_map_ptes) {
+		kfree(guest_map_ptes);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_init_protected_memory);
+
+void kvm_exit_protected_memory(void)
+{
+	if (guest_map_area)
+		free_vm_area(guest_map_area);
+	if (guest_map_ptes)
+		kfree(guest_map_ptes);
+}
+EXPORT_SYMBOL_GPL(kvm_exit_protected_memory);
+
+static int adjust_direct_mapping_pte_range(pmd_t *pmd, unsigned long addr,
+					   unsigned long end,
+					   struct mm_walk *walk)
+{
+	bool protect = (bool)walk->private;
+	pte_t *pte;
+	struct page *page;
+
+	if (pmd_trans_huge(*pmd)) {
+		page = pmd_page(*pmd);
+		if (is_huge_zero_page(page))
+			return 0;
+		VM_BUG_ON_PAGE(total_mapcount(page) != 1, page);
+		/* XXX: Would it fail with direct device assignment? */
+		VM_BUG_ON_PAGE(page_count(page) != 1, page);
+		kernel_map_pages(page, HPAGE_PMD_NR, !protect);
+		return 0;
+	}
+
+	pte = pte_offset_map(pmd, addr);
+	for (; addr != end; pte++, addr += PAGE_SIZE) {
+		pte_t entry = *pte;
+
+		if (!pte_present(entry))
+			continue;
+
+		if (is_zero_pfn(pte_pfn(entry)))
+			continue;
+
+		page = pte_page(entry);
+
+		VM_BUG_ON_PAGE(page_mapcount(page) != 1, page);
+		kernel_map_pages(page, 1, !protect);
+	}
+
+	return 0;
+}
+
+static const struct mm_walk_ops adjust_direct_mapping_ops = {
+	.pmd_entry      = adjust_direct_mapping_pte_range,
+};
+
 int __kvm_protect_memory(unsigned long start, unsigned long end, bool protect)
 {
 	struct mm_struct *mm = current->mm;
@@ -50,6 +144,13 @@ int __kvm_protect_memory(unsigned long start, unsigned long end, bool protect)
 		if (ret)
 			goto out;
 
+		if (vma_is_anonymous(vma)) {
+			ret = walk_page_range_novma(mm, start, tmp,
+						    &adjust_direct_mapping_ops, NULL,
+						    (void *) protect);
+			if (ret)
+				goto out;
+		}
 next:
 		start = tmp;
 		if (start < prev->vm_end)
-- 
2.26.2



  parent reply	other threads:[~2020-10-20  6:19 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-20  6:18 [RFCv2 00/16] KVM protected memory extension Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 01/16] x86/mm: Move force_dma_unencrypted() to common code Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 02/16] x86/kvm: Introduce KVM memory protection feature Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 03/16] x86/kvm: Make DMA pages shared Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 04/16] x86/kvm: Use bounce buffers for KVM memory protection Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 05/16] x86/kvm: Make VirtIO use DMA API in KVM guest Kirill A. Shutemov
2020-10-20  8:06   ` Christoph Hellwig
2020-10-20 12:47     ` Kirill A. Shutemov
2020-10-22  3:31   ` Halil Pasic
2020-10-20  6:18 ` [RFCv2 06/16] x86/kvmclock: Share hvclock memory with the host Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 07/16] x86/realmode: Share trampoline area if KVM memory protection enabled Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 08/16] KVM: Use GUP instead of copy_from/to_user() to access guest memory Kirill A. Shutemov
2020-10-20  8:25   ` John Hubbard
2020-10-20 12:51     ` Kirill A. Shutemov
2020-10-22 11:49     ` Matthew Wilcox
2020-10-22 19:58       ` John Hubbard
2020-10-26  4:21         ` Matthew Wilcox
2020-10-26  4:44           ` John Hubbard
2020-10-26 13:28             ` Matthew Wilcox
2020-10-26 14:16               ` Jason Gunthorpe
2020-10-26 20:52               ` John Hubbard
2020-10-20 17:29   ` Ira Weiny
2020-10-22 11:37     ` Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 09/16] KVM: mm: Introduce VM_KVM_PROTECTED Kirill A. Shutemov
2020-10-21 18:47   ` Edgecombe, Rick P
2020-10-22 12:01     ` Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 10/16] KVM: x86: Use GUP for page walk instead of __get_user() Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 11/16] KVM: Protected memory extension Kirill A. Shutemov
2020-10-20  7:17   ` Peter Zijlstra
2020-10-20 12:55     ` Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 12/16] KVM: x86: Enabled protected " Kirill A. Shutemov
2020-10-20  6:18 ` Kirill A. Shutemov [this message]
2020-10-20  6:18 ` [RFCv2 14/16] KVM: Handle protected memory in __kvm_map_gfn()/__kvm_unmap_gfn() Kirill A. Shutemov
2020-10-21 18:50   ` Edgecombe, Rick P
2020-10-22 12:06     ` Kirill A. Shutemov
2020-10-22 16:59       ` Edgecombe, Rick P
2020-10-23 10:36         ` Kirill A. Shutemov
2020-10-22  3:26   ` Halil Pasic
2020-10-22 12:07     ` Kirill A. Shutemov
2020-10-20  6:18 ` [RFCv2 15/16] KVM: Unmap protected pages from direct mapping Kirill A. Shutemov
2020-10-20  7:12   ` Peter Zijlstra
2020-10-20 12:18   ` David Hildenbrand
2020-10-20 13:20     ` David Hildenbrand
2020-10-21  1:20       ` Edgecombe, Rick P
2020-10-26 19:55     ` Tom Lendacky
2020-10-21 18:49   ` Edgecombe, Rick P
2020-10-23 12:37   ` Mike Rapoport
2020-10-23 16:32     ` Sean Christopherson
2020-10-20  6:18 ` [RFCv2 16/16] mm: Do not use zero page for VM_KVM_PROTECTED VMAs Kirill A. Shutemov
2020-10-20  7:46 ` [RFCv2 00/16] KVM protected memory extension Vitaly Kuznetsov
2020-10-20 13:49   ` Kirill A. Shutemov
2020-10-21 14:46     ` Vitaly Kuznetsov
2020-10-23 11:35       ` Kirill A. Shutemov
2020-10-23 12:01         ` Vitaly Kuznetsov
2020-10-21 18:20 ` Andy Lutomirski
2020-10-26 15:29   ` Kirill A. Shutemov
2020-10-26 23:58     ` Andy Lutomirski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201020061859.18385-14-kirill.shutemov@linux.intel.com \
    --to=kirill@shutemov.name \
    --cc=aarcange@redhat.com \
    --cc=andi.kleen@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=keescook@chromium.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=liran.alon@oracle.com \
    --cc=luto@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rick.p.edgecombe@intel.com \
    --cc=rientjes@google.com \
    --cc=rppt@kernel.org \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wad@chromium.org \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).