All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] p2m: use defines for page sizes
@ 2011-08-25 12:44 Christoph Egger
  2011-08-26 12:03 ` Tim Deegan
  0 siblings, 1 reply; 2+ messages in thread
From: Christoph Egger @ 2011-08-25 12:44 UTC (permalink / raw)
  To: xen-devel, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 383 bytes --]


Use defines for page sizes instead of hardcoding the value.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>

-- 
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85689 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632

[-- Attachment #2: xen_pageorder.diff --]
[-- Type: text/plain, Size: 12805 bytes --]

diff -r 301b4561d128 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c	Tue Aug 23 14:34:15 2011 +0200
+++ b/xen/arch/x86/mm/p2m-pod.c	Thu Aug 25 14:29:18 2011 +0200
@@ -112,11 +112,11 @@ p2m_pod_cache_add(struct p2m_domain *p2m
     /* Then add the first one to the appropriate populate-on-demand list */
     switch(order)
     {
-    case 9:
+    case PAGE_ORDER_2M:
         page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
         p2m->pod.count += 1 << order;
         break;
-    case 0:
+    case PAGE_ORDER_4K:
         page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
         p2m->pod.count += 1;
         break;
@@ -143,11 +143,11 @@ static struct page_info * p2m_pod_cache_
     struct page_info *p = NULL;
     int i;
 
-    if ( order == 9 && page_list_empty(&p2m->pod.super) )
+    if ( order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) )
     {
         return NULL;
     }
-    else if ( order == 0 && page_list_empty(&p2m->pod.single) )
+    else if ( order == PAGE_ORDER_4K && page_list_empty(&p2m->pod.single) )
     {
         unsigned long mfn;
         struct page_info *q;
@@ -168,12 +168,12 @@ static struct page_info * p2m_pod_cache_
 
     switch ( order )
     {
-    case 9:
+    case PAGE_ORDER_2M:
         BUG_ON( page_list_empty(&p2m->pod.super) );
         p = page_list_remove_head(&p2m->pod.super);
         p2m->pod.count -= 1 << order; /* Lock: page_alloc */
         break;
-    case 0:
+    case PAGE_ORDER_4K:
         BUG_ON( page_list_empty(&p2m->pod.single) );
         p = page_list_remove_head(&p2m->pod.single);
         p2m->pod.count -= 1;
@@ -206,17 +206,17 @@ p2m_pod_set_cache_target(struct p2m_doma
         int order;
 
         if ( (pod_target - p2m->pod.count) >= SUPERPAGE_PAGES )
-            order = 9;
+            order = PAGE_ORDER_2M;
         else
-            order = 0;
+            order = PAGE_ORDER_4K;
     retry:
-        page = alloc_domheap_pages(d, order, 0);
+        page = alloc_domheap_pages(d, order, PAGE_ORDER_4K);
         if ( unlikely(page == NULL) )
         {
-            if ( order == 9 )
+            if ( order == PAGE_ORDER_2M )
             {
                 /* If we can't allocate a superpage, try singleton pages */
-                order = 0;
+                order = PAGE_ORDER_4K;
                 goto retry;
             }   
             
@@ -249,9 +249,9 @@ p2m_pod_set_cache_target(struct p2m_doma
 
         if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES
              && !page_list_empty(&p2m->pod.super) )
-            order = 9;
+            order = PAGE_ORDER_2M;
         else
-            order = 0;
+            order = PAGE_ORDER_4K;
 
         page = p2m_pod_cache_get(p2m, order);
 
@@ -468,12 +468,12 @@ p2m_pod_offline_or_broken_replace(struct
 
     free_domheap_page(p);
 
-    p = alloc_domheap_page(d, 0);
+    p = alloc_domheap_page(d, PAGE_ORDER_4K);
     if ( unlikely(!p) )
         return;
 
     p2m_lock(p2m);
-    p2m_pod_cache_add(p2m, p, 0);
+    p2m_pod_cache_add(p2m, p, PAGE_ORDER_4K);
     p2m_unlock(p2m);
     return;
 }
@@ -688,7 +688,7 @@ p2m_pod_zero_check_superpage(struct p2m_
     }
 
     /* Try to remove the page, restoring old mapping if it fails. */
-    set_p2m_entry(p2m, gfn, _mfn(0), 9,
+    set_p2m_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
                   p2m_populate_on_demand, p2m->default_access);
 
     /* Make none of the MFNs are used elsewhere... for example, mapped
@@ -739,7 +739,7 @@ p2m_pod_zero_check_superpage(struct p2m_
 
     /* Finally!  We've passed all the checks, and can add the mfn superpage
      * back on the PoD cache, and account for the new p2m PoD entries */
-    p2m_pod_cache_add(p2m, mfn_to_page(mfn0), 9);
+    p2m_pod_cache_add(p2m, mfn_to_page(mfn0), PAGE_ORDER_2M);
     p2m->pod.entry_count += SUPERPAGE_PAGES;
 
 out_reset:
@@ -800,7 +800,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
         }
 
         /* Try to remove the page, restoring old mapping if it fails. */
-        set_p2m_entry(p2m, gfns[i], _mfn(0), 0,
+        set_p2m_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
 
         /* See if the page was successfully unmapped.  (Allow one refcount
@@ -810,7 +810,8 @@ p2m_pod_zero_check(struct p2m_domain *p2
             unmap_domain_page(map[i]);
             map[i] = NULL;
 
-            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
+            set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+                types[i], p2m->default_access);
 
             continue;
         }
@@ -832,7 +833,8 @@ p2m_pod_zero_check(struct p2m_domain *p2
          * check timing.  */
         if ( j < PAGE_SIZE/sizeof(*map[i]) )
         {
-            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
+            set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+                types[i], p2m->default_access);
         }
         else
         {
@@ -852,7 +854,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
             }
 
             /* Add to cache, and account for the new p2m PoD entry */
-            p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), 0);
+            p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), PAGE_ORDER_4K);
             p2m->pod.entry_count++;
         }
     }
@@ -867,7 +869,7 @@ p2m_pod_emergency_sweep_super(struct p2m
 
     if ( p2m->pod.reclaim_super == 0 )
     {
-        p2m->pod.reclaim_super = (p2m->pod.max_guest>>9)<<9;
+        p2m->pod.reclaim_super = (p2m->pod.max_guest>>PAGE_ORDER_2M)<<PAGE_ORDER_2M;
         p2m->pod.reclaim_super -= SUPERPAGE_PAGES;
     }
     
@@ -956,7 +958,7 @@ p2m_pod_demand_populate(struct p2m_domai
 
     /* Because PoD does not have cache list for 1GB pages, it has to remap
      * 1GB region to 2MB chunks for a retry. */
-    if ( order == 18 )
+    if ( order == PAGE_ORDER_1G )
     {
         gfn_aligned = (gfn >> order) << order;
         /* Note that we are supposed to call set_p2m_entry() 512 times to 
@@ -964,7 +966,7 @@ p2m_pod_demand_populate(struct p2m_domai
          * set_p2m_entry() should automatically shatter the 1GB page into 
          * 512 2MB pages. The rest of 511 calls are unnecessary.
          */
-        set_p2m_entry(p2m, gfn_aligned, _mfn(0), 9,
+        set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
                       p2m_populate_on_demand, p2m->default_access);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
@@ -979,12 +981,12 @@ p2m_pod_demand_populate(struct p2m_domai
     {
 
         /* If we're low, start a sweep */
-        if ( order == 9 && page_list_empty(&p2m->pod.super) )
+        if ( order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) )
             p2m_pod_emergency_sweep_super(p2m);
 
         if ( page_list_empty(&p2m->pod.single) &&
-             ( ( order == 0 )
-               || (order == 9 && page_list_empty(&p2m->pod.super) ) ) )
+             ( ( order == PAGE_ORDER_4K )
+               || (order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) ) ) )
             p2m_pod_emergency_sweep(p2m);
     }
 
@@ -1046,13 +1048,13 @@ out_of_memory:
 out_fail:
     return -1;
 remap_and_retry:
-    BUG_ON(order != 9);
+    BUG_ON(order != PAGE_ORDER_2M);
     spin_unlock(&d->page_alloc_lock);
 
     /* Remap this 2-meg region in singleton chunks */
     gfn_aligned = (gfn>>order)<<order;
     for(i=0; i<(1<<order); i++)
-        set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), 0,
+        set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
     if ( tb_init_done )
     {
diff -r 301b4561d128 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c	Tue Aug 23 14:34:15 2011 +0200
+++ b/xen/arch/x86/mm/p2m-pt.c	Thu Aug 25 14:29:18 2011 +0200
@@ -64,7 +64,7 @@ static unsigned long p2m_type_to_flags(p
      */
     flags = (unsigned long)(t & 0x7f) << 12;
 #else
-    flags = (t & 0x7UL) << 9;
+    flags = (t & 0x7UL) << PAGE_ORDER_2M;
 #endif
 
 #ifndef __x86_64__
@@ -121,12 +121,12 @@ static void
 p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
 {
     /* End if the entry is a leaf entry. */
-    if ( page_order == 0
+    if ( page_order == PAGE_ORDER_4K 
          || !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT)
          || (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
         return;
 
-    if ( page_order > 9 )
+    if ( page_order > PAGE_ORDER_2M )
     {
         l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
         for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
@@ -323,7 +323,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
     /*
      * Try to allocate 1GB page table if this feature is supported.
      */
-    if ( page_order == 18 )
+    if ( page_order == PAGE_ORDER_1G )
     {
         l1_pgentry_t old_entry = l1e_empty();
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -373,7 +373,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
                               PGT_l2_page_table) )
         goto out;
 
-    if ( page_order == 0 )
+    if ( page_order == PAGE_ORDER_4K )
     {
         if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
                              L2_PAGETABLE_SHIFT - PAGE_SHIFT,
@@ -399,7 +399,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
     }
-    else if ( page_order == 9 )
+    else if ( page_order == PAGE_ORDER_2M )
     {
         l1_pgentry_t old_entry = l1e_empty();
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -541,7 +541,7 @@ pod_retry_l3:
             /* The read has succeeded, so we know that mapping exists */
             if ( q != p2m_query )
             {
-                if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
+                if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
                     goto pod_retry_l3;
                 p2mt = p2m_invalid;
                 printk("%s: Allocate 1GB failed!\n", __func__);
@@ -735,7 +735,7 @@ pod_retry_l3:
             {
                 if ( q != p2m_query )
                 {
-                    if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
+                    if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
                         goto pod_retry_l3;
                 }
                 else
@@ -771,7 +771,7 @@ pod_retry_l2:
         {
             if ( q != p2m_query ) {
                 if ( !p2m_pod_check_and_populate(p2m, gfn,
-                                                 (l1_pgentry_t *)l2e, 9, q) )
+                                                 (l1_pgentry_t *)l2e, PAGE_ORDER_2M, q) )
                     goto pod_retry_l2;
             } else
                 *t = p2m_populate_on_demand;
@@ -803,7 +803,7 @@ pod_retry_l1:
         {
             if ( q != p2m_query ) {
                 if ( !p2m_pod_check_and_populate(p2m, gfn,
-                                                 (l1_pgentry_t *)l1e, 0, q) )
+                                                 (l1_pgentry_t *)l1e, PAGE_ORDER_4K, q) )
                     goto pod_retry_l1;
             } else
                 *t = p2m_populate_on_demand;
diff -r 301b4561d128 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c	Tue Aug 23 14:34:15 2011 +0200
+++ b/xen/arch/x86/mm/p2m.c	Thu Aug 25 14:29:18 2011 +0200
@@ -149,10 +149,10 @@ int set_p2m_entry(struct p2m_domain *p2m
     while ( todo )
     {
         if ( hap_enabled(d) )
-            order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << 18) - 1)) == 0) &&
-                      hvm_hap_has_1gb(d) && opt_hap_1gb ) ? 18 :
-                      ((((gfn | mfn_x(mfn) | todo) & ((1ul << 9) - 1)) == 0) &&
-                      hvm_hap_has_2mb(d) && opt_hap_2mb) ? 9 : 0;
+            order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_1G) - 1)) == 0) &&
+                      hvm_hap_has_1gb(d) && opt_hap_1gb ) ? PAGE_ORDER_1G :
+                      ((((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_2M) - 1)) == 0) &&
+                      hvm_hap_has_2mb(d) && opt_hap_2mb) ? PAGE_ORDER_2M : PAGE_ORDER_4K;
         else
             order = 0;
 
diff -r 301b4561d128 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h	Tue Aug 23 14:34:15 2011 +0200
+++ b/xen/include/asm-x86/page.h	Thu Aug 25 14:29:18 2011 +0200
@@ -13,6 +13,10 @@
 #define PAGE_MASK           (~(PAGE_SIZE-1))
 #define PAGE_FLAG_MASK      (~0)
 
+#define PAGE_ORDER_4K       0
+#define PAGE_ORDER_2M       9
+#define PAGE_ORDER_1G       18
+
 #ifndef __ASSEMBLY__
 # include <asm/types.h>
 # include <xen/lib.h>

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] p2m: use defines for page sizes
  2011-08-25 12:44 [PATCH] p2m: use defines for page sizes Christoph Egger
@ 2011-08-26 12:03 ` Tim Deegan
  0 siblings, 0 replies; 2+ messages in thread
From: Tim Deegan @ 2011-08-26 12:03 UTC (permalink / raw)
  To: Christoph Egger; +Cc: xen-devel

At 14:44 +0200 on 25 Aug (1314283453), Christoph Egger wrote:
> 
> Use defines for page sizes instead of hardcoding the value.
> 
> Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>

Applied, thanks, except for this hunk, where that's not waht 9 means: 

> --- a/xen/arch/x86/mm/p2m-pt.c	Tue Aug 23 14:34:15 2011 +0200
> +++ b/xen/arch/x86/mm/p2m-pt.c	Thu Aug 25 14:29:18 2011 +0200
> @@ -64,7 +64,7 @@ static unsigned long p2m_type_to_flags(p
>       */
>      flags = (unsigned long)(t & 0x7f) << 12;
>  #else
> -    flags = (t & 0x7UL) << 9;
> +    flags = (t & 0x7UL) << PAGE_ORDER_2M;
>  #endif
>  
>  #ifndef __x86_64__

Cheers,

Tim.

-- 
Tim Deegan <tim@xen.org>
Principal Software Engineer, Xen Platform Team
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2011-08-26 12:03 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-25 12:44 [PATCH] p2m: use defines for page sizes Christoph Egger
2011-08-26 12:03 ` Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.