From: Alexandre Chartre <alexandre.chartre@oracle.com>
To: tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
hpa@zytor.com, dave.hansen@linux.intel.com, luto@kernel.org,
peterz@infradead.org, x86@kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Cc: pbonzini@redhat.com, konrad.wilk@oracle.com,
jan.setjeeilers@oracle.com, liran.alon@oracle.com,
junaids@google.com, graf@amazon.de, rppt@linux.vnet.ibm.com,
kuzuno@gmail.com, mgross@linux.intel.com,
alexandre.chartre@oracle.com
Subject: [RFC v4][PATCH part-2 06/13] mm/dpt: Functions to populate a decorated page-table from a VA range
Date: Mon, 4 May 2020 16:58:03 +0200 [thread overview]
Message-ID: <20200504145810.11882-7-alexandre.chartre@oracle.com> (raw)
In-Reply-To: <20200504145810.11882-1-alexandre.chartre@oracle.com>
Provide functions to copy page-table entries from the kernel page-table
to a decorated page-table for a specified VA range. These functions are
based on the copy_pxx_range() functions defined in mm/memory.c. A first
difference is that a level parameter can be specified to indicate the
page-table level (PGD, P4D, PUD PMD, PTE) at which the copy should be
done. Also functions don't rely on mm or vma, and they don't alter the
source page-table even if an entry is bad. Finally, the VA range start
and size don't need to be page-aligned.
Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
---
arch/x86/include/asm/dpt.h | 3 +
arch/x86/mm/dpt.c | 205 +++++++++++++++++++++++++++++++++++++
2 files changed, 208 insertions(+)
diff --git a/arch/x86/include/asm/dpt.h b/arch/x86/include/asm/dpt.h
index b9cba051ebf2..85d2c5051acb 100644
--- a/arch/x86/include/asm/dpt.h
+++ b/arch/x86/include/asm/dpt.h
@@ -40,5 +40,8 @@ struct dpt {
extern struct dpt *dpt_create(unsigned int pgt_alignment);
extern void dpt_destroy(struct dpt *dpt);
+extern int dpt_map_range(struct dpt *dpt, void *ptr, size_t size,
+ enum page_table_level level);
+extern int dpt_map(struct dpt *dpt, void *ptr, unsigned long size);
#endif
diff --git a/arch/x86/mm/dpt.c b/arch/x86/mm/dpt.c
index 7a1b4cd53b03..0e725344b921 100644
--- a/arch/x86/mm/dpt.c
+++ b/arch/x86/mm/dpt.c
@@ -384,6 +384,211 @@ static int dpt_set_pgd(struct dpt *dpt, pgd_t *pgd, pgd_t pgd_value)
return 0;
}
+static int dpt_copy_pte_range(struct dpt *dpt, pmd_t *dst_pmd, pmd_t *src_pmd,
+ unsigned long addr, unsigned long end)
+{
+ pte_t *src_pte, *dst_pte;
+
+ dst_pte = dpt_pte_alloc(dpt, dst_pmd, addr);
+ if (IS_ERR(dst_pte))
+ return PTR_ERR(dst_pte);
+
+ addr &= PAGE_MASK;
+ src_pte = pte_offset_map(src_pmd, addr);
+
+ do {
+ dpt_set_pte(dpt, dst_pte, *src_pte);
+
+ } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr < end);
+
+ return 0;
+}
+
+static int dpt_copy_pmd_range(struct dpt *dpt, pud_t *dst_pud, pud_t *src_pud,
+ unsigned long addr, unsigned long end,
+ enum page_table_level level)
+{
+ pmd_t *src_pmd, *dst_pmd;
+ unsigned long next;
+ int err;
+
+ dst_pmd = dpt_pmd_alloc(dpt, dst_pud, addr);
+ if (IS_ERR(dst_pmd))
+ return PTR_ERR(dst_pmd);
+
+ src_pmd = pmd_offset(src_pud, addr);
+
+ do {
+ next = pmd_addr_end(addr, end);
+ if (level == PGT_LEVEL_PMD || pmd_none(*src_pmd) ||
+ pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
+ err = dpt_set_pmd(dpt, dst_pmd, *src_pmd);
+ if (err)
+ return err;
+ continue;
+ }
+
+ if (!pmd_present(*src_pmd)) {
+ pr_warn("DPT %p: PMD not present for [%lx,%lx]\n",
+ dpt, addr, next - 1);
+ pmd_clear(dst_pmd);
+ continue;
+ }
+
+ err = dpt_copy_pte_range(dpt, dst_pmd, src_pmd, addr, next);
+ if (err) {
+ pr_err("DPT %p: PMD error copying PTE addr=%lx next=%lx\n",
+ dpt, addr, next);
+ return err;
+ }
+
+ } while (dst_pmd++, src_pmd++, addr = next, addr < end);
+
+ return 0;
+}
+
+static int dpt_copy_pud_range(struct dpt *dpt, p4d_t *dst_p4d, p4d_t *src_p4d,
+ unsigned long addr, unsigned long end,
+ enum page_table_level level)
+{
+ pud_t *src_pud, *dst_pud;
+ unsigned long next;
+ int err;
+
+ dst_pud = dpt_pud_alloc(dpt, dst_p4d, addr);
+ if (IS_ERR(dst_pud))
+ return PTR_ERR(dst_pud);
+
+ src_pud = pud_offset(src_p4d, addr);
+
+ do {
+ next = pud_addr_end(addr, end);
+ if (level == PGT_LEVEL_PUD || pud_none(*src_pud) ||
+ pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
+ err = dpt_set_pud(dpt, dst_pud, *src_pud);
+ if (err)
+ return err;
+ continue;
+ }
+
+ err = dpt_copy_pmd_range(dpt, dst_pud, src_pud, addr, next,
+ level);
+ if (err) {
+ pr_err("DPT %p: PUD error copying PMD addr=%lx next=%lx\n",
+ dpt, addr, next);
+ return err;
+ }
+
+ } while (dst_pud++, src_pud++, addr = next, addr < end);
+
+ return 0;
+}
+
+static int dpt_copy_p4d_range(struct dpt *dpt, pgd_t *dst_pgd, pgd_t *src_pgd,
+ unsigned long addr, unsigned long end,
+ enum page_table_level level)
+{
+ p4d_t *src_p4d, *dst_p4d;
+ unsigned long next;
+ int err;
+
+ dst_p4d = dpt_p4d_alloc(dpt, dst_pgd, addr);
+ if (IS_ERR(dst_p4d))
+ return PTR_ERR(dst_p4d);
+
+ src_p4d = p4d_offset(src_pgd, addr);
+
+ do {
+ next = p4d_addr_end(addr, end);
+ if (level == PGT_LEVEL_P4D || p4d_none(*src_p4d)) {
+ err = dpt_set_p4d(dpt, dst_p4d, *src_p4d);
+ if (err)
+ return err;
+ continue;
+ }
+
+ err = dpt_copy_pud_range(dpt, dst_p4d, src_p4d, addr, next,
+ level);
+ if (err) {
+ pr_err("DPT %p: P4D error copying PUD addr=%lx next=%lx\n",
+ dpt, addr, next);
+ return err;
+ }
+
+ } while (dst_p4d++, src_p4d++, addr = next, addr < end);
+
+ return 0;
+}
+
+static int dpt_copy_pgd_range(struct dpt *dpt,
+ pgd_t *dst_pagetable, pgd_t *src_pagetable,
+ unsigned long addr, unsigned long end,
+ enum page_table_level level)
+{
+ pgd_t *src_pgd, *dst_pgd;
+ unsigned long next;
+ int err;
+
+ dst_pgd = pgd_offset_pgd(dst_pagetable, addr);
+ src_pgd = pgd_offset_pgd(src_pagetable, addr);
+
+ do {
+ next = pgd_addr_end(addr, end);
+ if (level == PGT_LEVEL_PGD || pgd_none(*src_pgd)) {
+ err = dpt_set_pgd(dpt, dst_pgd, *src_pgd);
+ if (err)
+ return err;
+ continue;
+ }
+
+ err = dpt_copy_p4d_range(dpt, dst_pgd, src_pgd, addr, next,
+ level);
+ if (err) {
+ pr_err("DPT %p: PGD error copying P4D addr=%lx next=%lx\n",
+ dpt, addr, next);
+ return err;
+ }
+
+ } while (dst_pgd++, src_pgd++, addr = next, addr < end);
+
+ return 0;
+}
+
+/*
+ * Copy page table entries from the current page table (i.e. from the
+ * kernel page table) to the specified decorated page-table. The level
+ * parameter specifies the page-table level (PGD, P4D, PUD PMD, PTE)
+ * at which the copy should be done.
+ */
+int dpt_map_range(struct dpt *dpt, void *ptr, size_t size,
+ enum page_table_level level)
+{
+ unsigned long addr = (unsigned long)ptr;
+ unsigned long end = addr + ((unsigned long)size);
+ unsigned long flags;
+ int err;
+
+ pr_debug("DPT %p: MAP %px/%lx/%d\n", dpt, ptr, size, level);
+
+ spin_lock_irqsave(&dpt->lock, flags);
+ err = dpt_copy_pgd_range(dpt, dpt->pagetable, current->mm->pgd,
+ addr, end, level);
+ spin_unlock_irqrestore(&dpt->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL(dpt_map_range);
+
+/*
+ * Copy page-table PTE entries from the current page-table to the
+ * specified decorated page-table.
+ */
+int dpt_map(struct dpt *dpt, void *ptr, unsigned long size)
+{
+ return dpt_map_range(dpt, ptr, size, PGT_LEVEL_PTE);
+}
+EXPORT_SYMBOL(dpt_map);
+
/*
* dpt_create - allocate a page-table and create a corresponding
* decorated page-table. The page-table is allocated and aligned
--
2.18.2
next prev parent reply other threads:[~2020-05-04 15:00 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-04 14:57 [RFC v4][PATCH part-2 00/13] ASI - Part II (Decorated Page-Table) Alexandre Chartre
2020-05-04 14:57 ` [RFC v4][PATCH part-2 01/13] mm/x86: Introduce decorated page-table (dpt) Alexandre Chartre
2020-05-04 14:57 ` [RFC v4][PATCH part-2 02/13] mm/dpt: Track buffers allocated for a decorated page-table Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 03/13] mm/dpt: Add decorated page-table entry offset functions Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 04/13] mm/dpt: Add decorated page-table entry allocation functions Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 05/13] mm/dpt: Add decorated page-table entry set functions Alexandre Chartre
2020-05-04 14:58 ` Alexandre Chartre [this message]
2020-05-04 14:58 ` [RFC v4][PATCH part-2 07/13] mm/dpt: Helper functions to map module into a decorated page-table Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 08/13] mm/dpt: Keep track of VA ranges mapped in " Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 09/13] mm/dpt: Functions to clear decorated page-table entries for a VA range Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 10/13] mm/dpt: Function to copy page-table entries for percpu buffer Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 11/13] mm/dpt: Add decorated page-table remap function Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 12/13] mm/dpt: Handle decorated page-table mapped range leaks and overlaps Alexandre Chartre
2020-05-04 14:58 ` [RFC v4][PATCH part-2 13/13] mm/asi: Function to init decorated page-table with ASI core mappings Alexandre Chartre
2020-05-14 9:29 ` [RFC v4][PATCH part-2 00/13] ASI - Part II (Decorated Page-Table) Mike Rapoport
2020-05-14 11:42 ` Alexandre Chartre
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200504145810.11882-7-alexandre.chartre@oracle.com \
--to=alexandre.chartre@oracle.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=graf@amazon.de \
--cc=hpa@zytor.com \
--cc=jan.setjeeilers@oracle.com \
--cc=junaids@google.com \
--cc=konrad.wilk@oracle.com \
--cc=kuzuno@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=liran.alon@oracle.com \
--cc=luto@kernel.org \
--cc=mgross@linux.intel.com \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=rppt@linux.vnet.ibm.com \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).