All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] minios: don't rely on specific page table allocation scheme
@ 2015-11-20 13:52 Juergen Gross
  2015-11-20 14:09 ` Wei Liu
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Juergen Gross @ 2015-11-20 13:52 UTC (permalink / raw)
  To: stefano.stabellini, samuel.thibault, xen-devel, Ian.Campbell,
	ian.jackson, wei.liu2
  Cc: Juergen Gross

Today mini-os is making assumptions how the page tables it is started
with are being allocated. Especially it is using the number of page
table frames to calculate which is the first unmapped pfn.

Instead of relying on page table number assumptions just look into the
page tables to find the first pfn not already mapped.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
Changes in V2:
- remove need_pt_frame() as it simplifies code (suggested by Wei Liu)

---
 arch/x86/mm.c         | 81 ++++++++++-----------------------------------------
 include/x86/arch_mm.h |  7 -----
 2 files changed, 15 insertions(+), 73 deletions(-)

diff --git a/arch/x86/mm.c b/arch/x86/mm.c
index 9c6d1b8..b828efc 100644
--- a/arch/x86/mm.c
+++ b/arch/x86/mm.c
@@ -132,61 +132,6 @@ static void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
 }
 
 /*
- * Checks if a pagetable frame is needed at 'level' to map a given
- * address. Note, this function is specific to the initial page table
- * building.
- */
-static int need_pt_frame(unsigned long va, int level)
-{
-    unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
-#if defined(__x86_64__)
-    unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
-#else
-    unsigned long hyp_virt_end = 0xffffffff;
-#endif
-
-    /* In general frames will _not_ be needed if they were already
-       allocated to map the hypervisor into our VA space */
-#if defined(__x86_64__)
-    if ( level == L3_FRAME )
-    {
-        if ( l4_table_offset(va) >= 
-             l4_table_offset(hyp_virt_start) &&
-             l4_table_offset(va) <= 
-             l4_table_offset(hyp_virt_end))
-            return 0;
-        return 1;
-    } 
-    else
-#endif
-
-    if ( level == L2_FRAME )
-    {
-#if defined(__x86_64__)
-        if ( l4_table_offset(va) >= 
-             l4_table_offset(hyp_virt_start) &&
-             l4_table_offset(va) <= 
-             l4_table_offset(hyp_virt_end))
-#endif
-            if ( l3_table_offset(va) >= 
-                 l3_table_offset(hyp_virt_start) &&
-                 l3_table_offset(va) <= 
-                 l3_table_offset(hyp_virt_end))
-                return 0;
-
-        return 1;
-    } 
-    else 
-        /* Always need l1 frames */
-        if ( level == L1_FRAME )
-            return 1;
-
-    printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", 
-           level, hyp_virt_start, hyp_virt_end);
-    return -1;
-}
-
-/*
  * Build the initial pagetable.
  */
 static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
@@ -200,8 +145,8 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
     int count = 0;
     int rc;
 
-    pfn_to_map = 
-        (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
+    pfn_to_map = (*start_pfn + L1_PAGETABLE_ENTRIES - 1) &
+                 ~(L1_PAGETABLE_ENTRIES - 1);
 
     if ( *max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START) )
     {
@@ -229,9 +174,8 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
 #if defined(__x86_64__)
         offset = l4_table_offset(start_address);
         /* Need new L3 pt frame */
-        if ( !(start_address & L3_MASK) )
-            if ( need_pt_frame(start_address, L3_FRAME) ) 
-                new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
+        if ( !(tab[offset] & _PAGE_PRESENT) )
+            new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
 
         page = tab[offset];
         pt_mfn = pte_to_mfn(page);
@@ -239,18 +183,23 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
 #endif
         offset = l3_table_offset(start_address);
         /* Need new L2 pt frame */
-        if ( !(start_address & L2_MASK) )
-            if ( need_pt_frame(start_address, L2_FRAME) )
-                new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
+        if ( !(tab[offset] & _PAGE_PRESENT) )
+            new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
 
         page = tab[offset];
         pt_mfn = pte_to_mfn(page);
         tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
         offset = l2_table_offset(start_address);        
         /* Need new L1 pt frame */
-        if ( !(start_address & L1_MASK) )
-            if ( need_pt_frame(start_address, L1_FRAME) )
-                new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
+        if ( !(tab[offset] & _PAGE_PRESENT) )
+            new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
+        else if ( !(start_address & L1_MASK) )
+        {
+            /* Already mapped, skip this L1 entry. */
+            start_address += L1_PAGETABLE_ENTRIES << PAGE_SHIFT;
+            pfn_to_map += L1_PAGETABLE_ENTRIES;
+            continue;
+        }
 
         page = tab[offset];
         pt_mfn = pte_to_mfn(page);
diff --git a/include/x86/arch_mm.h b/include/x86/arch_mm.h
index 23cfca7..58f29fc 100644
--- a/include/x86/arch_mm.h
+++ b/include/x86/arch_mm.h
@@ -56,12 +56,6 @@
 
 #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
 
-/*
- * If starting from virtual address greater than 0xc0000000,
- * this value will be 2 to account for final mid-level page
- * directory which is always mapped in at this location.
- */
-#define NOT_L1_FRAMES           3
 #define PRIpte "016llx"
 #ifndef __ASSEMBLY__
 typedef uint64_t pgentry_t;
@@ -87,7 +81,6 @@ typedef uint64_t pgentry_t;
 #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
 #define L3_MASK  ((1UL << L4_PAGETABLE_SHIFT) - 1)
 
-#define NOT_L1_FRAMES           3
 #define PRIpte "016lx"
 #ifndef __ASSEMBLY__
 typedef unsigned long pgentry_t;
-- 
2.6.2

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] minios: don't rely on specific page table allocation scheme
  2015-11-20 13:52 [PATCH v2] minios: don't rely on specific page table allocation scheme Juergen Gross
@ 2015-11-20 14:09 ` Wei Liu
  2015-11-20 14:18 ` Ian Campbell
  2015-11-20 14:35 ` Samuel Thibault
  2 siblings, 0 replies; 5+ messages in thread
From: Wei Liu @ 2015-11-20 14:09 UTC (permalink / raw)
  To: Juergen Gross
  Cc: wei.liu2, Ian.Campbell, stefano.stabellini, ian.jackson,
	xen-devel, samuel.thibault

On Fri, Nov 20, 2015 at 02:52:57PM +0100, Juergen Gross wrote:
> Today mini-os is making assumptions how the page tables it is started
> with are being allocated. Especially it is using the number of page
> table frames to calculate which is the first unmapped pfn.
> 
> Instead of relying on page table number assumptions just look into the
> page tables to find the first pfn not already mapped.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> Changes in V2:
> - remove need_pt_frame() as it simplifies code (suggested by Wei Liu)
> 
> ---
>  arch/x86/mm.c         | 81 ++++++++++-----------------------------------------
>  include/x86/arch_mm.h |  7 -----
>  2 files changed, 15 insertions(+), 73 deletions(-)
> 
> diff --git a/arch/x86/mm.c b/arch/x86/mm.c
> index 9c6d1b8..b828efc 100644
> --- a/arch/x86/mm.c
> +++ b/arch/x86/mm.c
> @@ -132,61 +132,6 @@ static void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
>  }
>  
>  /*
> - * Checks if a pagetable frame is needed at 'level' to map a given
> - * address. Note, this function is specific to the initial page table
> - * building.
> - */
> -static int need_pt_frame(unsigned long va, int level)
> -{
> -    unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
> -#if defined(__x86_64__)
> -    unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
> -#else
> -    unsigned long hyp_virt_end = 0xffffffff;
> -#endif
> -
> -    /* In general frames will _not_ be needed if they were already
> -       allocated to map the hypervisor into our VA space */
> -#if defined(__x86_64__)
> -    if ( level == L3_FRAME )
> -    {
> -        if ( l4_table_offset(va) >= 
> -             l4_table_offset(hyp_virt_start) &&
> -             l4_table_offset(va) <= 
> -             l4_table_offset(hyp_virt_end))
> -            return 0;
> -        return 1;
> -    } 
> -    else
> -#endif
> -
> -    if ( level == L2_FRAME )
> -    {
> -#if defined(__x86_64__)
> -        if ( l4_table_offset(va) >= 
> -             l4_table_offset(hyp_virt_start) &&
> -             l4_table_offset(va) <= 
> -             l4_table_offset(hyp_virt_end))
> -#endif
> -            if ( l3_table_offset(va) >= 
> -                 l3_table_offset(hyp_virt_start) &&
> -                 l3_table_offset(va) <= 
> -                 l3_table_offset(hyp_virt_end))
> -                return 0;
> -
> -        return 1;
> -    } 
> -    else 
> -        /* Always need l1 frames */
> -        if ( level == L1_FRAME )
> -            return 1;
> -
> -    printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", 
> -           level, hyp_virt_start, hyp_virt_end);
> -    return -1;
> -}
> -
> -/*

Strangely git am rejects the above hunk. I manually fix that up in my
tree and test again.

All my tests in previous email passed.

Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Tested-by: Wei Liu <wei.liu2@citrix.com>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] minios: don't rely on specific page table allocation scheme
  2015-11-20 13:52 [PATCH v2] minios: don't rely on specific page table allocation scheme Juergen Gross
  2015-11-20 14:09 ` Wei Liu
@ 2015-11-20 14:18 ` Ian Campbell
  2015-11-20 14:35 ` Samuel Thibault
  2 siblings, 0 replies; 5+ messages in thread
From: Ian Campbell @ 2015-11-20 14:18 UTC (permalink / raw)
  To: Juergen Gross, stefano.stabellini, samuel.thibault, xen-devel,
	ian.jackson, wei.liu2
  Cc: minios-devel

On Fri, 2015-11-20 at 14:52 +0100, Juergen Gross wrote:

Copying minios-devel.

I'll send out a patch to MAINTAINERS to add this in a second.

> Today mini-os is making assumptions how the page tables it is started
> with are being allocated. Especially it is using the number of page
> table frames to calculate which is the first unmapped pfn.
> 
> Instead of relying on page table number assumptions just look into the
> page tables to find the first pfn not already mapped.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> Changes in V2:
> - remove need_pt_frame() as it simplifies code (suggested by Wei Liu)
> 
> ---
>  arch/x86/mm.c         | 81 ++++++++++-----------------------------------
> ------
>  include/x86/arch_mm.h |  7 -----
>  2 files changed, 15 insertions(+), 73 deletions(-)
> 
> diff --git a/arch/x86/mm.c b/arch/x86/mm.c
> index 9c6d1b8..b828efc 100644
> --- a/arch/x86/mm.c
> +++ b/arch/x86/mm.c
> @@ -132,61 +132,6 @@ static void new_pt_frame(unsigned long *pt_pfn,
> unsigned long prev_l_mfn,
>  }
>  
>  /*
> - * Checks if a pagetable frame is needed at 'level' to map a given
> - * address. Note, this function is specific to the initial page table
> - * building.
> - */
> -static int need_pt_frame(unsigned long va, int level)
> -{
> -    unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
> -#if defined(__x86_64__)
> -    unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
> -#else
> -    unsigned long hyp_virt_end = 0xffffffff;
> -#endif
> -
> -    /* In general frames will _not_ be needed if they were already
> -       allocated to map the hypervisor into our VA space */
> -#if defined(__x86_64__)
> -    if ( level == L3_FRAME )
> -    {
> -        if ( l4_table_offset(va) >= 
> -             l4_table_offset(hyp_virt_start) &&
> -             l4_table_offset(va) <= 
> -             l4_table_offset(hyp_virt_end))
> -            return 0;
> -        return 1;
> -    } 
> -    else
> -#endif
> -
> -    if ( level == L2_FRAME )
> -    {
> -#if defined(__x86_64__)
> -        if ( l4_table_offset(va) >= 
> -             l4_table_offset(hyp_virt_start) &&
> -             l4_table_offset(va) <= 
> -             l4_table_offset(hyp_virt_end))
> -#endif
> -            if ( l3_table_offset(va) >= 
> -                 l3_table_offset(hyp_virt_start) &&
> -                 l3_table_offset(va) <= 
> -                 l3_table_offset(hyp_virt_end))
> -                return 0;
> -
> -        return 1;
> -    } 
> -    else 
> -        /* Always need l1 frames */
> -        if ( level == L1_FRAME )
> -            return 1;
> -
> -    printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", 
> -           level, hyp_virt_start, hyp_virt_end);
> -    return -1;
> -}
> -
> -/*
>   * Build the initial pagetable.
>   */
>  static void build_pagetable(unsigned long *start_pfn, unsigned long
> *max_pfn)
> @@ -200,8 +145,8 @@ static void build_pagetable(unsigned long *start_pfn,
> unsigned long *max_pfn)
>      int count = 0;
>      int rc;
>  
> -    pfn_to_map = 
> -        (start_info.nr_pt_frames - NOT_L1_FRAMES) *
> L1_PAGETABLE_ENTRIES;
> +    pfn_to_map = (*start_pfn + L1_PAGETABLE_ENTRIES - 1) &
> +                 ~(L1_PAGETABLE_ENTRIES - 1);
>  
>      if ( *max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START) )
>      {
> @@ -229,9 +174,8 @@ static void build_pagetable(unsigned long *start_pfn,
> unsigned long *max_pfn)
>  #if defined(__x86_64__)
>          offset = l4_table_offset(start_address);
>          /* Need new L3 pt frame */
> -        if ( !(start_address & L3_MASK) )
> -            if ( need_pt_frame(start_address, L3_FRAME) ) 
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
>  
>          page = tab[offset];
>          pt_mfn = pte_to_mfn(page);
> @@ -239,18 +183,23 @@ static void build_pagetable(unsigned long
> *start_pfn, unsigned long *max_pfn)
>  #endif
>          offset = l3_table_offset(start_address);
>          /* Need new L2 pt frame */
> -        if ( !(start_address & L2_MASK) )
> -            if ( need_pt_frame(start_address, L2_FRAME) )
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
>  
>          page = tab[offset];
>          pt_mfn = pte_to_mfn(page);
>          tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
>          offset = l2_table_offset(start_address);        
>          /* Need new L1 pt frame */
> -        if ( !(start_address & L1_MASK) )
> -            if ( need_pt_frame(start_address, L1_FRAME) )
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
> +        else if ( !(start_address & L1_MASK) )
> +        {
> +            /* Already mapped, skip this L1 entry. */
> +            start_address += L1_PAGETABLE_ENTRIES << PAGE_SHIFT;
> +            pfn_to_map += L1_PAGETABLE_ENTRIES;
> +            continue;
> +        }
>  
>          page = tab[offset];
>          pt_mfn = pte_to_mfn(page);
> diff --git a/include/x86/arch_mm.h b/include/x86/arch_mm.h
> index 23cfca7..58f29fc 100644
> --- a/include/x86/arch_mm.h
> +++ b/include/x86/arch_mm.h
> @@ -56,12 +56,6 @@
>  
>  #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
>  
> -/*
> - * If starting from virtual address greater than 0xc0000000,
> - * this value will be 2 to account for final mid-level page
> - * directory which is always mapped in at this location.
> - */
> -#define NOT_L1_FRAMES           3
>  #define PRIpte "016llx"
>  #ifndef __ASSEMBLY__
>  typedef uint64_t pgentry_t;
> @@ -87,7 +81,6 @@ typedef uint64_t pgentry_t;
>  #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
>  #define L3_MASK  ((1UL << L4_PAGETABLE_SHIFT) - 1)
>  
> -#define NOT_L1_FRAMES           3
>  #define PRIpte "016lx"
>  #ifndef __ASSEMBLY__
>  typedef unsigned long pgentry_t;
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] minios: don't rely on specific page table allocation scheme
  2015-11-20 13:52 [PATCH v2] minios: don't rely on specific page table allocation scheme Juergen Gross
  2015-11-20 14:09 ` Wei Liu
  2015-11-20 14:18 ` Ian Campbell
@ 2015-11-20 14:35 ` Samuel Thibault
  2015-11-20 14:47   ` Juergen Gross
  2 siblings, 1 reply; 5+ messages in thread
From: Samuel Thibault @ 2015-11-20 14:35 UTC (permalink / raw)
  To: Juergen Gross
  Cc: wei.liu2, xen-devel, ian.jackson, Ian.Campbell, stefano.stabellini

Hello,

Juergen Gross, on Fri 20 Nov 2015 14:52:57 +0100, wrote:
> Today mini-os is making assumptions how the page tables it is started
> with are being allocated. Especially it is using the number of page
> table frames to calculate which is the first unmapped pfn.
> 
> Instead of relying on page table number assumptions just look into the
> page tables to find the first pfn not already mapped.

I agree on the principle

> @@ -200,8 +145,8 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
>      int count = 0;
>      int rc;
>  
> -    pfn_to_map = 
> -        (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
> +    pfn_to_map = (*start_pfn + L1_PAGETABLE_ENTRIES - 1) &
> +                 ~(L1_PAGETABLE_ENTRIES - 1);

Why aligning up on L1_PAGETABLE_ENTRIES.  Because Xen always maps
a whole L1 pt frame?  I'd say just assume even less by just taking
*start_pfn.  Yes, it's a (small) waste, but it makes the code less
obscure.  What do you think Wei?

> @@ -229,9 +174,8 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
>  #if defined(__x86_64__)
>          offset = l4_table_offset(start_address);
>          /* Need new L3 pt frame */
> -        if ( !(start_address & L3_MASK) )
> -            if ( need_pt_frame(start_address, L3_FRAME) ) 
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);

It replaces a test on the address with systematically reading the tab,
but that tab will most probably be in the L1d cache, so not much more
costly than the test while getting more readable code.

>          pt_mfn = pte_to_mfn(page);
>          tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
>          offset = l2_table_offset(start_address);        
>          /* Need new L1 pt frame */
> -        if ( !(start_address & L1_MASK) )
> -            if ( need_pt_frame(start_address, L1_FRAME) )
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
> +        else if ( !(start_address & L1_MASK) )
> +        {
> +            /* Already mapped, skip this L1 entry. */

Again, I'd say not assume anything here, to keep the code simple at the
expense of a waste. It means instead...

> +            start_address += L1_PAGETABLE_ENTRIES << PAGE_SHIFT;
> +            pfn_to_map += L1_PAGETABLE_ENTRIES;
> +            continue;
> +        }
>  
>          page = tab[offset];
>          pt_mfn = pte_to_mfn(page);
tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
>          offset = l1_table_offset(start_address);

... checking tab[offset] & _PAGE_PRESENT here before adding an MMU update.

In the end that'll make us re-read the whole L1 page tables built by the
domain builder, but this looks cheap enough while being more readable
code and safer to me.

Samuel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] minios: don't rely on specific page table allocation scheme
  2015-11-20 14:35 ` Samuel Thibault
@ 2015-11-20 14:47   ` Juergen Gross
  0 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2015-11-20 14:47 UTC (permalink / raw)
  To: Samuel Thibault, stefano.stabellini, xen-devel, Ian.Campbell,
	ian.jackson, wei.liu2

On 20/11/15 15:35, Samuel Thibault wrote:
> Hello,
> 
> Juergen Gross, on Fri 20 Nov 2015 14:52:57 +0100, wrote:
>> Today mini-os is making assumptions how the page tables it is started
>> with are being allocated. Especially it is using the number of page
>> table frames to calculate which is the first unmapped pfn.
>>
>> Instead of relying on page table number assumptions just look into the
>> page tables to find the first pfn not already mapped.
> 
> I agree on the principle
> 
>> @@ -200,8 +145,8 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
>>      int count = 0;
>>      int rc;
>>  
>> -    pfn_to_map = 
>> -        (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
>> +    pfn_to_map = (*start_pfn + L1_PAGETABLE_ENTRIES - 1) &
>> +                 ~(L1_PAGETABLE_ENTRIES - 1);
> 
> Why aligning up on L1_PAGETABLE_ENTRIES.  Because Xen always maps
> a whole L1 pt frame?  I'd say just assume even less by just taking
> *start_pfn.  Yes, it's a (small) waste, but it makes the code less
> obscure.  What do you think Wei?
> 
>> @@ -229,9 +174,8 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
>>  #if defined(__x86_64__)
>>          offset = l4_table_offset(start_address);
>>          /* Need new L3 pt frame */
>> -        if ( !(start_address & L3_MASK) )
>> -            if ( need_pt_frame(start_address, L3_FRAME) ) 
>> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
>> +        if ( !(tab[offset] & _PAGE_PRESENT) )
>> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
> 
> It replaces a test on the address with systematically reading the tab,
> but that tab will most probably be in the L1d cache, so not much more
> costly than the test while getting more readable code.
> 
>>          pt_mfn = pte_to_mfn(page);
>>          tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
>>          offset = l2_table_offset(start_address);        
>>          /* Need new L1 pt frame */
>> -        if ( !(start_address & L1_MASK) )
>> -            if ( need_pt_frame(start_address, L1_FRAME) )
>> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
>> +        if ( !(tab[offset] & _PAGE_PRESENT) )
>> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
>> +        else if ( !(start_address & L1_MASK) )
>> +        {
>> +            /* Already mapped, skip this L1 entry. */
> 
> Again, I'd say not assume anything here, to keep the code simple at the
> expense of a waste. It means instead...
> 
>> +            start_address += L1_PAGETABLE_ENTRIES << PAGE_SHIFT;
>> +            pfn_to_map += L1_PAGETABLE_ENTRIES;
>> +            continue;
>> +        }
>>  
>>          page = tab[offset];
>>          pt_mfn = pte_to_mfn(page);
> tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
>>          offset = l1_table_offset(start_address);
> 
> ... checking tab[offset] & _PAGE_PRESENT here before adding an MMU update.
> 
> In the end that'll make us re-read the whole L1 page tables built by the
> domain builder, but this looks cheap enough while being more readable
> code and safer to me.

Okay, I'll have a try.


Juergen

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2015-11-20 14:47 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-11-20 13:52 [PATCH v2] minios: don't rely on specific page table allocation scheme Juergen Gross
2015-11-20 14:09 ` Wei Liu
2015-11-20 14:18 ` Ian Campbell
2015-11-20 14:35 ` Samuel Thibault
2015-11-20 14:47   ` Juergen Gross

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.