linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Alexandre Chartre <alexandre.chartre@oracle.com>
To: pbonzini@redhat.com, rkrcmar@redhat.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, hpa@zytor.com,
	dave.hansen@linux.intel.com, luto@kernel.org,
	peterz@infradead.org, kvm@vger.kernel.org, x86@kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: konrad.wilk@oracle.com, jan.setjeeilers@oracle.com,
	liran.alon@oracle.com, jwadams@google.com,
	alexandre.chartre@oracle.com
Subject: [RFC KVM 14/27] kvm/isolation: functions to copy page table entries for a VA range
Date: Mon, 13 May 2019 16:38:22 +0200	[thread overview]
Message-ID: <1557758315-12667-15-git-send-email-alexandre.chartre@oracle.com> (raw)
In-Reply-To: <1557758315-12667-1-git-send-email-alexandre.chartre@oracle.com>

These functions are based on the copy_pxx_range() functions defined in
mm/memory.c. The main difference is that a level parameter is specified
to indicate the page table level (PGD, P4D, PUD PMD, PTE) at which the
copy should be done. Also functions don't use a vma parameter, and
don't alter the source page table even if an entry is bad.

Also kvm_copy_pte_range() can be called with a non page-aligned buffer,
so the buffer should be aligned with the page start so that the entire
buffer is mapped if the end of buffer crosses a page.

These functions will be used to populate the KVM page table.

Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
---
 arch/x86/kvm/isolation.c |  229 ++++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/isolation.h |    1 +
 2 files changed, 230 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index b681e4f..4f1b511 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -450,6 +450,235 @@ static int kvm_set_pgd(pgd_t *pgd, pgd_t pgd_value)
 }
 
 
+static int kvm_copy_pte_range(struct mm_struct *dst_mm,
+			      struct mm_struct *src_mm, pmd_t *dst_pmd,
+			      pmd_t *src_pmd, unsigned long addr,
+			      unsigned long end)
+{
+	pte_t *src_pte, *dst_pte;
+
+	dst_pte = kvm_pte_alloc(dst_mm, dst_pmd, addr);
+	if (IS_ERR(dst_pte))
+		return PTR_ERR(dst_pte);
+
+	addr &= PAGE_MASK;
+	src_pte = pte_offset_map(src_pmd, addr);
+
+	do {
+		pr_debug("PTE: %lx/%lx set[%lx] = %lx\n",
+		    addr, addr + PAGE_SIZE, (long)dst_pte, pte_val(*src_pte));
+		set_pte_at(dst_mm, addr, dst_pte, *src_pte);
+
+	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr < end);
+
+	return 0;
+}
+
+static int kvm_copy_pmd_range(struct mm_struct *dst_mm,
+			      struct mm_struct *src_mm,
+			      pud_t *dst_pud, pud_t *src_pud,
+			      unsigned long addr, unsigned long end,
+			      enum page_table_level level)
+{
+	pmd_t *src_pmd, *dst_pmd;
+	unsigned long next;
+	int err;
+
+	dst_pmd = kvm_pmd_alloc(dst_mm, dst_pud, addr);
+	if (IS_ERR(dst_pmd))
+		return PTR_ERR(dst_pmd);
+
+	src_pmd = pmd_offset(src_pud, addr);
+
+	do {
+		next = pmd_addr_end(addr, end);
+		if (level == PGT_LEVEL_PMD || pmd_none(*src_pmd)) {
+			pr_debug("PMD: %lx/%lx set[%lx] = %lx\n",
+			    addr, next, (long)dst_pmd, pmd_val(*src_pmd));
+			err = kvm_set_pmd(dst_pmd, *src_pmd);
+			if (err)
+				return err;
+			continue;
+		}
+
+		if (!pmd_present(*src_pmd)) {
+			pr_warn("PMD: not present for [%lx,%lx]\n",
+			    addr, next - 1);
+			pmd_clear(dst_pmd);
+			continue;
+		}
+
+		if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
+			pr_debug("PMD: %lx/%lx set[%lx] = %lx (huge/devmap)\n",
+			    addr, next, (long)dst_pmd, pmd_val(*src_pmd));
+			err = kvm_set_pmd(dst_pmd, *src_pmd);
+			if  (err)
+				return err;
+			continue;
+		}
+
+		err = kvm_copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
+					addr, next);
+		if (err) {
+			pr_err("PMD: ERR PTE addr=%lx next=%lx\n", addr, next);
+			return err;
+		}
+
+	} while (dst_pmd++, src_pmd++, addr = next, addr < end);
+
+	return 0;
+}
+
+static int kvm_copy_pud_range(struct mm_struct *dst_mm,
+			      struct mm_struct *src_mm,
+			      p4d_t *dst_p4d, p4d_t *src_p4d,
+			      unsigned long addr, unsigned long end,
+			      enum page_table_level level)
+{
+	pud_t *src_pud, *dst_pud;
+	unsigned long next;
+	int err;
+
+	dst_pud = kvm_pud_alloc(dst_mm, dst_p4d, addr);
+	if (IS_ERR(dst_pud))
+		return PTR_ERR(dst_pud);
+
+	src_pud = pud_offset(src_p4d, addr);
+
+	do {
+		next = pud_addr_end(addr, end);
+		if (level == PGT_LEVEL_PUD || pud_none(*src_pud)) {
+			pr_debug("PUD: %lx/%lx set[%lx] = %lx\n",
+			    addr, next, (long)dst_pud, pud_val(*src_pud));
+			err = kvm_set_pud(dst_pud, *src_pud);
+			if (err)
+				return err;
+			continue;
+		}
+
+		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
+			pr_debug("PUD: %lx/%lx set[%lx] = %lx (huge/devmap)\n",
+			    addr, next, (long)dst_pud, pud_val(*src_pud));
+			err = kvm_set_pud(dst_pud, *src_pud);
+			if (err)
+				return err;
+			continue;
+		}
+
+		err = kvm_copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
+					addr, next, level);
+		if (err) {
+			pr_err("PUD: ERR PMD addr=%lx next=%lx\n", addr, next);
+			return err;
+		}
+
+	} while (dst_pud++, src_pud++, addr = next, addr < end);
+
+	return 0;
+}
+
+static int kvm_copy_p4d_range(struct mm_struct *dst_mm,
+				struct mm_struct *src_mm,
+				pgd_t *dst_pgd, pgd_t *src_pgd,
+				unsigned long addr, unsigned long end,
+				enum page_table_level level)
+{
+	p4d_t *src_p4d, *dst_p4d;
+	unsigned long next;
+	int err;
+
+	dst_p4d = kvm_p4d_alloc(dst_mm, dst_pgd, addr);
+	if (IS_ERR(dst_p4d))
+		return PTR_ERR(dst_p4d);
+
+	src_p4d = p4d_offset(src_pgd, addr);
+
+	do {
+		next = p4d_addr_end(addr, end);
+		if (level == PGT_LEVEL_P4D || p4d_none(*src_p4d)) {
+			pr_debug("P4D: %lx/%lx set[%lx] = %lx\n",
+			    addr, next, (long)dst_p4d, p4d_val(*src_p4d));
+
+			err = kvm_set_p4d(dst_p4d, *src_p4d);
+			if (err)
+				return err;
+			continue;
+		}
+
+		err = kvm_copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
+					addr, next, level);
+		if (err) {
+			pr_err("P4D: ERR PUD addr=%lx next=%lx\n", addr, next);
+			return err;
+		}
+
+	} while (dst_p4d++, src_p4d++, addr = next, addr < end);
+
+	return 0;
+}
+
+static int kvm_copy_pgd_range(struct mm_struct *dst_mm,
+				struct mm_struct *src_mm, unsigned long addr,
+				unsigned long end, enum page_table_level level)
+{
+	pgd_t *src_pgd, *dst_pgd;
+	unsigned long next;
+	int err;
+
+	dst_pgd = pgd_offset(dst_mm, addr);
+	src_pgd = pgd_offset(src_mm, addr);
+
+	do {
+		next = pgd_addr_end(addr, end);
+		if (level == PGT_LEVEL_PGD || pgd_none(*src_pgd)) {
+			pr_debug("PGD: %lx/%lx set[%lx] = %lx\n",
+			    addr, next, (long)dst_pgd, pgd_val(*src_pgd));
+			err = kvm_set_pgd(dst_pgd, *src_pgd);
+			if (err)
+				return err;
+			continue;
+		}
+
+		err = kvm_copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
+					addr, next, level);
+		if (err) {
+			pr_err("PGD: ERR P4D addr=%lx next=%lx\n", addr, next);
+			return err;
+		}
+
+	} while (dst_pgd++, src_pgd++, addr = next, addr < end);
+
+	return 0;
+}
+
+/*
+ * Copy page table entries from the current page table (i.e. from the
+ * kernel page table) to the KVM page table. The level parameter specifies
+ * the page table level (PGD, P4D, PUD PMD, PTE) at which the copy should
+ * be done.
+ */
+static int kvm_copy_mapping(void *ptr, size_t size, enum page_table_level level)
+{
+	unsigned long addr = (unsigned long)ptr;
+	unsigned long end = addr + ((unsigned long)size);
+
+	BUG_ON(current->mm == &kvm_mm);
+	pr_debug("KERNMAP COPY addr=%px size=%lx\n", ptr, size);
+	return kvm_copy_pgd_range(&kvm_mm, current->mm, addr, end, level);
+}
+
+
+/*
+ * Copy page table PTE entries from the current page table to the KVM
+ * page table.
+ */
+int kvm_copy_ptes(void *ptr, unsigned long size)
+{
+	return kvm_copy_mapping(ptr, size, PGT_LEVEL_PTE);
+}
+EXPORT_SYMBOL(kvm_copy_ptes);
+
+
 static int kvm_isolation_init_mm(void)
 {
 	pgd_t *kvm_pgd;
diff --git a/arch/x86/kvm/isolation.h b/arch/x86/kvm/isolation.h
index aa5e979..e8c018a 100644
--- a/arch/x86/kvm/isolation.h
+++ b/arch/x86/kvm/isolation.h
@@ -16,5 +16,6 @@ static inline bool kvm_isolation(void)
 extern void kvm_isolation_enter(void);
 extern void kvm_isolation_exit(void);
 extern void kvm_may_access_sensitive_data(struct kvm_vcpu *vcpu);
+extern int kvm_copy_ptes(void *ptr, unsigned long size);
 
 #endif
-- 
1.7.1


  parent reply	other threads:[~2019-05-13 14:39 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-13 14:38 [RFC KVM 00/27] KVM Address Space Isolation Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 01/27] kernel: Export memory-management symbols required for KVM address space isolation Alexandre Chartre
2019-05-13 15:15   ` Peter Zijlstra
2019-05-13 15:17     ` Liran Alon
2019-05-13 14:38 ` [RFC KVM 02/27] KVM: x86: Introduce address_space_isolation module parameter Alexandre Chartre
2019-05-13 15:46   ` Andy Lutomirski
2019-05-13 15:55     ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 03/27] KVM: x86: Introduce KVM separate virtual address space Alexandre Chartre
2019-05-13 15:45   ` Andy Lutomirski
2019-05-13 16:04     ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 04/27] KVM: x86: Switch to KVM address space on entry to guest Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 05/27] KVM: x86: Add handler to exit kvm isolation Alexandre Chartre
2019-05-13 15:49   ` Andy Lutomirski
2019-05-13 16:10     ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 06/27] KVM: x86: Exit KVM isolation on IRQ entry Alexandre Chartre
2019-05-13 15:51   ` Andy Lutomirski
2019-05-13 16:28     ` Alexandre Chartre
2019-05-13 18:13       ` Andy Lutomirski
2019-05-14  7:07         ` Peter Zijlstra
2019-05-14  7:58           ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 07/27] KVM: x86: Switch to host address space when may access sensitive data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 08/27] KVM: x86: Optimize branches which checks if address space isolation enabled Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 09/27] kvm/isolation: function to track buffers allocated for the KVM page table Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 10/27] kvm/isolation: add KVM page table entry free functions Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 11/27] kvm/isolation: add KVM page table entry offset functions Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 12/27] kvm/isolation: add KVM page table entry allocation functions Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 13/27] kvm/isolation: add KVM page table entry set functions Alexandre Chartre
2019-05-13 14:38 ` Alexandre Chartre [this message]
2019-05-13 14:38 ` [RFC KVM 15/27] kvm/isolation: keep track of VA range mapped in KVM address space Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 16/27] kvm/isolation: functions to clear page table entries for a VA range Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 17/27] kvm/isolation: improve mapping copy when mapping is already present Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 18/27] kvm/isolation: function to copy page table entries for percpu buffer Alexandre Chartre
2019-05-13 18:18   ` Andy Lutomirski
2019-05-14  7:09     ` Peter Zijlstra
2019-05-14  8:25       ` Alexandre Chartre
2019-05-14  8:34         ` Andy Lutomirski
2019-05-14  9:41           ` Alexandre Chartre
2019-05-14 15:23             ` Andy Lutomirski
2019-05-14 16:24               ` Alexandre Chartre
2019-05-14 17:05                 ` Peter Zijlstra
2019-05-14 18:09                   ` Sean Christopherson
2019-05-14 20:33                     ` Andy Lutomirski
2019-05-14 21:06                       ` Sean Christopherson
2019-05-14 21:55                         ` Andy Lutomirski
2019-05-14 22:38                           ` Sean Christopherson
2019-05-18  0:05                             ` Jonathan Adams
2019-05-14 20:27                   ` Andy Lutomirski
2019-05-13 14:38 ` [RFC KVM 19/27] kvm/isolation: initialize the KVM page table with core mappings Alexandre Chartre
2019-05-13 15:50   ` Dave Hansen
2019-05-13 16:00     ` Andy Lutomirski
2019-05-13 17:00       ` Alexandre Chartre
2019-05-13 16:46     ` Sean Christopherson
2019-05-13 16:47     ` Alexandre Chartre
2019-05-14 10:26       ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 20/27] kvm/isolation: initialize the KVM page table with vmx specific data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 21/27] kvm/isolation: initialize the KVM page table with vmx VM data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 22/27] kvm/isolation: initialize the KVM page table with vmx cpu data Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 23/27] kvm/isolation: initialize the KVM page table with the vcpu tasks Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 24/27] kvm/isolation: KVM page fault handler Alexandre Chartre
2019-05-13 15:15   ` Peter Zijlstra
2019-05-13 21:25     ` Liran Alon
2019-05-14  2:02       ` Andy Lutomirski
2019-05-14  7:21         ` Peter Zijlstra
2019-05-14 15:36           ` Alexandre Chartre
2019-05-14 15:43             ` Andy Lutomirski
2019-05-13 16:02   ` Andy Lutomirski
2019-05-13 16:21     ` Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 25/27] kvm/isolation: implement actual KVM isolation enter/exit Alexandre Chartre
2019-05-13 15:16   ` Peter Zijlstra
2019-05-13 16:01   ` Andy Lutomirski
2019-05-13 14:38 ` [RFC KVM 26/27] kvm/isolation: initialize the KVM page table with KVM memslots Alexandre Chartre
2019-05-13 14:38 ` [RFC KVM 27/27] kvm/isolation: initialize the KVM page table with KVM buses Alexandre Chartre
2019-05-13 16:42 ` [RFC KVM 00/27] KVM Address Space Isolation Liran Alon
2019-05-13 18:17 ` Andy Lutomirski
2019-05-13 21:08   ` Liran Alon
2019-05-14  2:07     ` Andy Lutomirski
2019-05-14  7:37       ` Peter Zijlstra
2019-05-14 21:32         ` Jan Setje-Eilers
2019-05-14  8:05       ` Liran Alon
2019-05-14  7:29     ` Peter Zijlstra
2019-05-14  7:57       ` Liran Alon
2019-05-14  8:33     ` Alexandre Chartre
2019-05-13 19:31 ` Nakajima, Jun
2019-05-13 21:16   ` Liran Alon
2019-05-13 21:42     ` Nakajima, Jun
2019-05-13 21:53       ` Liran Alon
2019-05-15 12:52 ` Alexandre Chartre

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1557758315-12667-15-git-send-email-alexandre.chartre@oracle.com \
    --to=alexandre.chartre@oracle.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=jan.setjeeilers@oracle.com \
    --cc=jwadams@google.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=liran.alon@oracle.com \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).