All of lore.kernel.org
 help / color / mirror / Atom feed
From: Shawn Anastasio <sanastasio@raptorengineering.com>
To: xen-devel@lists.xenproject.org
Cc: Timothy Pearson <tpearson@raptorengineering.com>,
	Jan Beulich <jbeulich@suse.com>,
	Shawn Anastasio <sanastasio@raptorengineering.com>
Subject: [PATCH v2 7/7] xen/ppc: mm-radix: Allocate Partition and Process Tables at runtime
Date: Thu, 14 Dec 2023 20:44:02 -0600	[thread overview]
Message-ID: <f49a4a372a9f82e217fa56ba0dc3068deff32ef5.1702607884.git.sanastasio@raptorengineering.com> (raw)
In-Reply-To: <cover.1702607884.git.sanastasio@raptorengineering.com>

In the initial mm-radix implementation, the in-memory partition and
process tables required to configure the MMU were allocated statically
since the boot allocator was not yet available.

Now that it is, allocate these tables at runtime and bump the size of
the Process Table to its maximum supported value (on POWER9). Also bump
the number of static LVL2/3 PD frames to tolerate cases where the boot
allocator returns an address outside of the range of the LVL2 frame used
for Xen.

Signed-off-by: Shawn Anastasio <sanastasio@raptorengineering.com>
---
Changes in v2:
  - Bump LVL2/3 PD count to 3 to avoid running out in case the boot
  allocator returns a suitably high address.

 xen/arch/ppc/mm-radix.c | 169 +++++++++++++++++++++++-----------------
 1 file changed, 97 insertions(+), 72 deletions(-)

diff --git a/xen/arch/ppc/mm-radix.c b/xen/arch/ppc/mm-radix.c
index de181cf6f1..e5604d8fb3 100644
--- a/xen/arch/ppc/mm-radix.c
+++ b/xen/arch/ppc/mm-radix.c
@@ -22,7 +22,7 @@ void enable_mmu(void);
 #endif

 #define INITIAL_LVL1_PD_COUNT      1
-#define INITIAL_LVL2_LVL3_PD_COUNT 2
+#define INITIAL_LVL2_LVL3_PD_COUNT 3
 #define INITIAL_LVL4_PT_COUNT      256

 static size_t __initdata initial_lvl1_pd_pool_used;
@@ -34,17 +34,13 @@ static struct lvl2_pd initial_lvl2_lvl3_pd_pool[INITIAL_LVL2_LVL3_PD_COUNT];
 static size_t __initdata initial_lvl4_pt_pool_used;
 static struct lvl4_pt initial_lvl4_pt_pool[INITIAL_LVL4_PT_COUNT];

-/* Only reserve minimum Partition and Process tables  */
 #define PATB_SIZE_LOG2 16 /* Only supported partition table size on POWER9 */
 #define PATB_SIZE      (1UL << PATB_SIZE_LOG2)
-#define PRTB_SIZE_LOG2 12
+#define PRTB_SIZE_LOG2 24 /* Maximum process table size on POWER9 */
 #define PRTB_SIZE      (1UL << PRTB_SIZE_LOG2)

-static struct patb_entry
-    __aligned(PATB_SIZE) initial_patb[PATB_SIZE / sizeof(struct patb_entry)];
-
-static struct prtb_entry
-    __aligned(PRTB_SIZE) initial_prtb[PRTB_SIZE / sizeof(struct prtb_entry)];
+static struct patb_entry *initial_patb;
+static struct prtb_entry *initial_prtb;

 static __init struct lvl1_pd *lvl1_pd_pool_alloc(void)
 {
@@ -86,6 +82,62 @@ static __init struct lvl4_pt *lvl4_pt_pool_alloc(void)
     return &initial_lvl4_pt_pool[initial_lvl4_pt_pool_used++];
 }

+static void map_page_initial(struct lvl1_pd *lvl1, vaddr_t virt, paddr_t phys,
+                             unsigned long flags)
+{
+    struct lvl2_pd *lvl2;
+    struct lvl3_pd *lvl3;
+    struct lvl4_pt *lvl4;
+    pde_t *pde;
+    pte_t *pte;
+
+    /* Allocate LVL 2 PD if necessary */
+    pde = pt_entry(lvl1, virt);
+    if ( !pde_is_valid(*pde) )
+    {
+        lvl2 = lvl2_pd_pool_alloc();
+        *pde = paddr_to_pde(__pa(lvl2), PDE_VALID,
+                            XEN_PT_ENTRIES_LOG2_LVL_2);
+    }
+    else
+        lvl2 = __va(pde_to_paddr(*pde));
+
+    /* Allocate LVL 3 PD if necessary */
+    pde = pt_entry(lvl2, virt);
+    if ( !pde_is_valid(*pde) )
+    {
+        lvl3 = lvl3_pd_pool_alloc();
+        *pde = paddr_to_pde(__pa(lvl3), PDE_VALID,
+                            XEN_PT_ENTRIES_LOG2_LVL_3);
+    }
+    else
+        lvl3 = __va(pde_to_paddr(*pde));
+
+    /* Allocate LVL 4 PT if necessary */
+    pde = pt_entry(lvl3, virt);
+    if ( !pde_is_valid(*pde) )
+    {
+        lvl4 = lvl4_pt_pool_alloc();
+        *pde = paddr_to_pde(__pa(lvl4), PDE_VALID,
+                            XEN_PT_ENTRIES_LOG2_LVL_4);
+    }
+    else
+        lvl4 = __va(pde_to_paddr(*pde));
+
+    /* Finally, create PTE in LVL 4 PT */
+    pte = pt_entry(lvl4, virt);
+    if ( !pte_is_valid(*pte) )
+    {
+        radix_dprintk("%016lx being mapped to %016lx\n", phys, virt);
+        *pte = paddr_to_pte(phys, flags);
+    }
+    else
+    {
+        early_printk("BUG: Tried to create PTE for already-mapped page!");
+        die();
+    }
+}
+
 static void __init setup_initial_mapping(struct lvl1_pd *lvl1,
                                          vaddr_t map_start,
                                          vaddr_t map_end,
@@ -105,80 +157,43 @@ static void __init setup_initial_mapping(struct lvl1_pd *lvl1,
         die();
     }

+    /* Identity map Xen itself */
     for ( page_addr = map_start; page_addr < map_end; page_addr += PAGE_SIZE )
     {
-        struct lvl2_pd *lvl2;
-        struct lvl3_pd *lvl3;
-        struct lvl4_pt *lvl4;
-        pde_t *pde;
-        pte_t *pte;
-
-        /* Allocate LVL 2 PD if necessary */
-        pde = pt_entry(lvl1, page_addr);
-        if ( !pde_is_valid(*pde) )
-        {
-            lvl2 = lvl2_pd_pool_alloc();
-            *pde = paddr_to_pde(__pa(lvl2), PDE_VALID,
-                                XEN_PT_ENTRIES_LOG2_LVL_2);
-        }
-        else
-            lvl2 = __va(pde_to_paddr(*pde));
+        unsigned long flags;

-        /* Allocate LVL 3 PD if necessary */
-        pde = pt_entry(lvl2, page_addr);
-        if ( !pde_is_valid(*pde) )
+        if ( is_kernel_text(page_addr) || is_kernel_inittext(page_addr) )
         {
-            lvl3 = lvl3_pd_pool_alloc();
-            *pde = paddr_to_pde(__pa(lvl3), PDE_VALID,
-                                XEN_PT_ENTRIES_LOG2_LVL_3);
+            radix_dprintk("%016lx being marked as TEXT (RX)\n", page_addr);
+            flags = PTE_XEN_RX;
         }
-        else
-            lvl3 = __va(pde_to_paddr(*pde));
-
-        /* Allocate LVL 4 PT if necessary */
-        pde = pt_entry(lvl3, page_addr);
-        if ( !pde_is_valid(*pde) )
-        {
-            lvl4 = lvl4_pt_pool_alloc();
-            *pde = paddr_to_pde(__pa(lvl4), PDE_VALID,
-                                XEN_PT_ENTRIES_LOG2_LVL_4);
-        }
-        else
-            lvl4 = __va(pde_to_paddr(*pde));
-
-        /* Finally, create PTE in LVL 4 PT */
-        pte = pt_entry(lvl4, page_addr);
-        if ( !pte_is_valid(*pte) )
+        else if ( is_kernel_rodata(page_addr) )
         {
-            unsigned long paddr = (page_addr - map_start) + phys_base;
-            unsigned long flags;
-
-            radix_dprintk("%016lx being mapped to %016lx\n", paddr, page_addr);
-            if ( is_kernel_text(page_addr) || is_kernel_inittext(page_addr) )
-            {
-                radix_dprintk("%016lx being marked as TEXT (RX)\n", page_addr);
-                flags = PTE_XEN_RX;
-            }
-            else if ( is_kernel_rodata(page_addr) )
-            {
-                radix_dprintk("%016lx being marked as RODATA (RO)\n", page_addr);
-                flags = PTE_XEN_RO;
-            }
-            else
-            {
-                radix_dprintk("%016lx being marked as DEFAULT (RW)\n", page_addr);
-                flags = PTE_XEN_RW;
-            }
-
-            *pte = paddr_to_pte(paddr, flags);
-            radix_dprintk("%016lx is the result of PTE map\n",
-                paddr_to_pte(paddr, flags).pte);
+            radix_dprintk("%016lx being marked as RODATA (RO)\n", page_addr);
+            flags = PTE_XEN_RO;
         }
         else
         {
-            early_printk("BUG: Tried to create PTE for already-mapped page!");
-            die();
+            radix_dprintk("%016lx being marked as DEFAULT (RW)\n", page_addr);
+            flags = PTE_XEN_RW;
         }
+
+        map_page_initial(lvl1, page_addr, (page_addr - map_start) + phys_base, flags);
+    }
+
+    /* Map runtime-allocated PATB, PRTB */
+    for ( page_addr = (uint64_t)initial_patb;
+          page_addr < (uint64_t)initial_patb + PATB_SIZE;
+          page_addr += PAGE_SIZE )
+    {
+        map_page_initial(lvl1, page_addr, __pa(page_addr), PTE_XEN_RW);
+    }
+
+    for ( page_addr = (uint64_t)initial_prtb;
+          page_addr < (uint64_t)initial_prtb + PRTB_SIZE;
+          page_addr += PAGE_SIZE )
+    {
+        map_page_initial(lvl1, page_addr, __pa(page_addr), PTE_XEN_RW);
     }
 }

@@ -210,6 +225,16 @@ void __init setup_initial_pagetables(void)
 {
     struct lvl1_pd *root = lvl1_pd_pool_alloc();
     unsigned long lpcr;
+    mfn_t patb_mfn, prtb_mfn;
+
+    /* Allocate mfns for in-memory tables using the boot allocator */
+    prtb_mfn = alloc_boot_pages(PRTB_SIZE / PAGE_SIZE,
+                                max(1, PRTB_SIZE_LOG2 - PAGE_SHIFT));
+    patb_mfn = alloc_boot_pages(PATB_SIZE / PAGE_SIZE,
+                                max(1, PATB_SIZE_LOG2 - PAGE_SHIFT));
+
+    initial_patb = __va(mfn_to_maddr(patb_mfn));
+    initial_prtb = __va(mfn_to_maddr(prtb_mfn));

     setup_initial_mapping(root, (vaddr_t)_start, (vaddr_t)_end, __pa(_start));

--
2.30.2



  parent reply	other threads:[~2023-12-15  2:52 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-15  2:43 [PATCH v2 0/7] Early Boot Allocation on Power Shawn Anastasio
2023-12-15  2:43 ` [PATCH v2 1/7] xen/asm-generic: Introduce generic static-shmem.h Shawn Anastasio
2023-12-19 17:07   ` Jan Beulich
2023-12-15  2:43 ` [PATCH v2 2/7] xen/asm-generic: Introduce generic setup.h Shawn Anastasio
2023-12-20  7:23   ` Jan Beulich
2023-12-20 11:09   ` Jan Beulich
2023-12-20 18:51     ` Shawn Anastasio
2023-12-15  2:43 ` [PATCH v2 3/7] xen/common: Move Arm's bootfdt to common Shawn Anastasio
2023-12-19 17:03   ` Jan Beulich
2023-12-19 18:29     ` Julien Grall
2023-12-20  8:09       ` Jan Beulich
2023-12-20 20:58         ` Shawn Anastasio
2023-12-20 22:08           ` Julien Grall
2023-12-20 22:10             ` Timothy Pearson
2023-12-21  7:11             ` Jan Beulich
2023-12-20 13:53   ` Julien Grall
2023-12-15  2:43 ` [PATCH v2 4/7] xen/device-tree: Fix bootfdt.c to tolerate 0 reserved regions Shawn Anastasio
2024-01-09 18:14   ` Julien Grall
2024-01-10  8:15     ` Michal Orzel
2024-01-11 22:56     ` Shawn Anastasio
2023-12-15  2:44 ` [PATCH v2 5/7] xen/ppc: Enable bootfdt and boot allocator Shawn Anastasio
2023-12-20 11:44   ` Jan Beulich
2023-12-20 13:23   ` Julien Grall
2023-12-20 16:07     ` Julien Grall
2023-12-20 13:49   ` Julien Grall
2024-01-18  1:36     ` Shawn Anastasio
2024-01-18  9:21       ` Julien Grall
2023-12-15  2:44 ` [PATCH v2 6/7] xen/ppc: mm-radix: Replace debug printing code with printk Shawn Anastasio
2023-12-20 11:48   ` Jan Beulich
2024-01-18  1:37     ` Shawn Anastasio
2023-12-15  2:44 ` Shawn Anastasio [this message]
2023-12-21  7:06   ` [PATCH v2 7/7] xen/ppc: mm-radix: Allocate Partition and Process Tables at runtime Jan Beulich
2024-03-14 18:29     ` Shawn Anastasio

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f49a4a372a9f82e217fa56ba0dc3068deff32ef5.1702607884.git.sanastasio@raptorengineering.com \
    --to=sanastasio@raptorengineering.com \
    --cc=jbeulich@suse.com \
    --cc=tpearson@raptorengineering.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.