All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
@ 2015-09-28 14:30 Jan Beulich
  2015-09-29 12:20 ` Andrew Cooper
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Jan Beulich @ 2015-09-28 14:30 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 7517 bytes --]

Now that p2m->get_entry() always returns a valid order, utilize this
to accelerate some of the operations in PoD code. (There are two uses
of p2m->get_entry() left which don't easily lend themselves to this
optimization.)

Also adjust a few types as needed and remove stale comments from
p2m_pod_cache_add() (to avoid duplicating them yet another time).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Add a code comment in p2m_pod_decrease_reservation().

--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -119,20 +119,23 @@ p2m_pod_cache_add(struct p2m_domain *p2m
 
     unlock_page_alloc(p2m);
 
-    /* Then add the first one to the appropriate populate-on-demand list */
-    switch(order)
+    /* Then add to the appropriate populate-on-demand list. */
+    switch ( order )
     {
+    case PAGE_ORDER_1G:
+        for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
+            page_list_add_tail(page + i, &p2m->pod.super);
+        break;
     case PAGE_ORDER_2M:
-        page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
-        p2m->pod.count += 1 << order;
+        page_list_add_tail(page, &p2m->pod.super);
         break;
     case PAGE_ORDER_4K:
-        page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
-        p2m->pod.count += 1;
+        page_list_add_tail(page, &p2m->pod.single);
         break;
     default:
         BUG();
     }
+    p2m->pod.count += 1 << order;
 
     return 0;
 }
@@ -502,11 +505,10 @@ p2m_pod_decrease_reservation(struct doma
                              unsigned int order)
 {
     int ret=0;
-    int i;
+    unsigned long i, n;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
-    int steal_for_cache;
-    int pod, nonpod, ram;
+    bool_t steal_for_cache;
+    long pod, nonpod, ram;
 
     gfn_lock(p2m, gpfn, order);
     pod_lock(p2m);    
@@ -525,21 +527,21 @@ recount:
     /* Figure out if we need to steal some freed memory for our cache */
     steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
 
-    /* FIXME: Add contiguous; query for PSE entries? */
-    for ( i=0; i<(1<<order); i++)
+    for ( i = 0; i < (1UL << order); i += n )
     {
         p2m_access_t a;
         p2m_type_t t;
+        unsigned int cur_order;
 
-        (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
-
+        p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+        n = 1UL << min(order, cur_order);
         if ( t == p2m_populate_on_demand )
-            pod++;
+            pod += n;
         else
         {
-            nonpod++;
+            nonpod += n;
             if ( p2m_is_ram(t) )
-                ram++;
+                ram += n;
         }
     }
 
@@ -574,41 +576,53 @@ recount:
      * + There are PoD entries to handle, or
      * + There is ram left, and we want to steal it
      */
-    for ( i=0;
-          i<(1<<order) && (pod>0 || (steal_for_cache && ram > 0));
-          i++)
+    for ( i = 0;
+          i < (1UL << order) && (pod > 0 || (steal_for_cache && ram > 0));
+          i += n )
     {
         mfn_t mfn;
         p2m_type_t t;
         p2m_access_t a;
+        unsigned int cur_order;
 
-        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
+        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+        if ( order < cur_order )
+            cur_order = order;
+        n = 1UL << cur_order;
         if ( t == p2m_populate_on_demand )
         {
-            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
-                          p2m->default_access);
-            p2m->pod.entry_count--;
+            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+                          p2m_invalid, p2m->default_access);
+            p2m->pod.entry_count -= n;
             BUG_ON(p2m->pod.entry_count < 0);
-            pod--;
+            pod -= n;
         }
         else if ( steal_for_cache && p2m_is_ram(t) )
         {
+            /*
+             * If we need less than 1 << cur_order, we may end up stealing
+             * more memory here than we actually need. This will be rectified
+             * below, however; and stealing too much and then freeing what we
+             * need may allow us to free smaller pages from the cache, and
+             * avoid breaking up superpages.
+             */
             struct page_info *page;
+            unsigned int j;
 
             ASSERT(mfn_valid(mfn));
 
             page = mfn_to_page(mfn);
 
-            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
-                          p2m->default_access);
-            set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
-
-            p2m_pod_cache_add(p2m, page, 0);
+            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+                          p2m_invalid, p2m->default_access);
+            for ( j = 0; j < n; ++j )
+                set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+            p2m_pod_cache_add(p2m, page, cur_order);
 
             steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
 
-            nonpod--;
-            ram--;
+            nonpod -= n;
+            ram -= n;
         }
     }    
 
@@ -649,7 +656,8 @@ p2m_pod_zero_check_superpage(struct p2m_
     p2m_type_t type, type0 = 0;
     unsigned long * map = NULL;
     int ret=0, reset = 0;
-    int i, j;
+    unsigned long i, n;
+    unsigned int j;
     int max_ref = 1;
     struct domain *d = p2m->domain;
 
@@ -668,10 +676,13 @@ p2m_pod_zero_check_superpage(struct p2m_
 
     /* Look up the mfns, checking to make sure they're the same mfn
      * and aligned, and mapping them. */
-    for ( i=0; i<SUPERPAGE_PAGES; i++ )
+    for ( i = 0; i < SUPERPAGE_PAGES; i += n )
     {
         p2m_access_t a; 
-        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
+        unsigned int cur_order;
+
+        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
+        n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
 
         if ( i == 0 )
         {
@@ -1114,7 +1125,7 @@ guest_physmap_mark_populate_on_demand(st
                                       unsigned int order)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    unsigned long i, pod_count = 0;
+    unsigned long i, n, pod_count = 0;
     p2m_type_t ot;
     mfn_t omfn;
     int rc = 0;
@@ -1127,10 +1138,13 @@ guest_physmap_mark_populate_on_demand(st
     P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
 
     /* Make sure all gpfns are unused */
-    for ( i = 0; i < (1UL << order); i++ )
+    for ( i = 0; i < (1UL << order); i += n )
     {
         p2m_access_t a;
-        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
+        unsigned int cur_order;
+
+        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, &cur_order, NULL);
+        n = 1UL << min(order, cur_order);
         if ( p2m_is_ram(ot) )
         {
             P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot);
@@ -1140,7 +1154,7 @@ guest_physmap_mark_populate_on_demand(st
         else if ( ot == p2m_populate_on_demand )
         {
             /* Count how man PoD entries we'll be replacing if successful */
-            pod_count++;
+            pod_count += n;
         }
     }
 



[-- Attachment #2: x86-p2m-pod-query-contiguous.patch --]
[-- Type: text/plain, Size: 7575 bytes --]

x86/PoD: shorten certain operations on higher order ranges

Now that p2m->get_entry() always returns a valid order, utilize this
to accelerate some of the operations in PoD code. (There are two uses
of p2m->get_entry() left which don't easily lend themselves to this
optimization.)

Also adjust a few types as needed and remove stale comments from
p2m_pod_cache_add() (to avoid duplicating them yet another time).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Add a code comment in p2m_pod_decrease_reservation().

--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -119,20 +119,23 @@ p2m_pod_cache_add(struct p2m_domain *p2m
 
     unlock_page_alloc(p2m);
 
-    /* Then add the first one to the appropriate populate-on-demand list */
-    switch(order)
+    /* Then add to the appropriate populate-on-demand list. */
+    switch ( order )
     {
+    case PAGE_ORDER_1G:
+        for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
+            page_list_add_tail(page + i, &p2m->pod.super);
+        break;
     case PAGE_ORDER_2M:
-        page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
-        p2m->pod.count += 1 << order;
+        page_list_add_tail(page, &p2m->pod.super);
         break;
     case PAGE_ORDER_4K:
-        page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
-        p2m->pod.count += 1;
+        page_list_add_tail(page, &p2m->pod.single);
         break;
     default:
         BUG();
     }
+    p2m->pod.count += 1 << order;
 
     return 0;
 }
@@ -502,11 +505,10 @@ p2m_pod_decrease_reservation(struct doma
                              unsigned int order)
 {
     int ret=0;
-    int i;
+    unsigned long i, n;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
-    int steal_for_cache;
-    int pod, nonpod, ram;
+    bool_t steal_for_cache;
+    long pod, nonpod, ram;
 
     gfn_lock(p2m, gpfn, order);
     pod_lock(p2m);    
@@ -525,21 +527,21 @@ recount:
     /* Figure out if we need to steal some freed memory for our cache */
     steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
 
-    /* FIXME: Add contiguous; query for PSE entries? */
-    for ( i=0; i<(1<<order); i++)
+    for ( i = 0; i < (1UL << order); i += n )
     {
         p2m_access_t a;
         p2m_type_t t;
+        unsigned int cur_order;
 
-        (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
-
+        p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+        n = 1UL << min(order, cur_order);
         if ( t == p2m_populate_on_demand )
-            pod++;
+            pod += n;
         else
         {
-            nonpod++;
+            nonpod += n;
             if ( p2m_is_ram(t) )
-                ram++;
+                ram += n;
         }
     }
 
@@ -574,41 +576,53 @@ recount:
      * + There are PoD entries to handle, or
      * + There is ram left, and we want to steal it
      */
-    for ( i=0;
-          i<(1<<order) && (pod>0 || (steal_for_cache && ram > 0));
-          i++)
+    for ( i = 0;
+          i < (1UL << order) && (pod > 0 || (steal_for_cache && ram > 0));
+          i += n )
     {
         mfn_t mfn;
         p2m_type_t t;
         p2m_access_t a;
+        unsigned int cur_order;
 
-        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
+        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+        if ( order < cur_order )
+            cur_order = order;
+        n = 1UL << cur_order;
         if ( t == p2m_populate_on_demand )
         {
-            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
-                          p2m->default_access);
-            p2m->pod.entry_count--;
+            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+                          p2m_invalid, p2m->default_access);
+            p2m->pod.entry_count -= n;
             BUG_ON(p2m->pod.entry_count < 0);
-            pod--;
+            pod -= n;
         }
         else if ( steal_for_cache && p2m_is_ram(t) )
         {
+            /*
+             * If we need less than 1 << cur_order, we may end up stealing
+             * more memory here than we actually need. This will be rectified
+             * below, however; and stealing too much and then freeing what we
+             * need may allow us to free smaller pages from the cache, and
+             * avoid breaking up superpages.
+             */
             struct page_info *page;
+            unsigned int j;
 
             ASSERT(mfn_valid(mfn));
 
             page = mfn_to_page(mfn);
 
-            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
-                          p2m->default_access);
-            set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
-
-            p2m_pod_cache_add(p2m, page, 0);
+            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+                          p2m_invalid, p2m->default_access);
+            for ( j = 0; j < n; ++j )
+                set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+            p2m_pod_cache_add(p2m, page, cur_order);
 
             steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
 
-            nonpod--;
-            ram--;
+            nonpod -= n;
+            ram -= n;
         }
     }    
 
@@ -649,7 +656,8 @@ p2m_pod_zero_check_superpage(struct p2m_
     p2m_type_t type, type0 = 0;
     unsigned long * map = NULL;
     int ret=0, reset = 0;
-    int i, j;
+    unsigned long i, n;
+    unsigned int j;
     int max_ref = 1;
     struct domain *d = p2m->domain;
 
@@ -668,10 +676,13 @@ p2m_pod_zero_check_superpage(struct p2m_
 
     /* Look up the mfns, checking to make sure they're the same mfn
      * and aligned, and mapping them. */
-    for ( i=0; i<SUPERPAGE_PAGES; i++ )
+    for ( i = 0; i < SUPERPAGE_PAGES; i += n )
     {
         p2m_access_t a; 
-        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
+        unsigned int cur_order;
+
+        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
+        n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
 
         if ( i == 0 )
         {
@@ -1114,7 +1125,7 @@ guest_physmap_mark_populate_on_demand(st
                                       unsigned int order)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    unsigned long i, pod_count = 0;
+    unsigned long i, n, pod_count = 0;
     p2m_type_t ot;
     mfn_t omfn;
     int rc = 0;
@@ -1127,10 +1138,13 @@ guest_physmap_mark_populate_on_demand(st
     P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
 
     /* Make sure all gpfns are unused */
-    for ( i = 0; i < (1UL << order); i++ )
+    for ( i = 0; i < (1UL << order); i += n )
     {
         p2m_access_t a;
-        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
+        unsigned int cur_order;
+
+        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, &cur_order, NULL);
+        n = 1UL << min(order, cur_order);
         if ( p2m_is_ram(ot) )
         {
             P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot);
@@ -1140,7 +1154,7 @@ guest_physmap_mark_populate_on_demand(st
         else if ( ot == p2m_populate_on_demand )
         {
             /* Count how man PoD entries we'll be replacing if successful */
-            pod_count++;
+            pod_count += n;
         }
     }
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-28 14:30 [PATCH v2] x86/PoD: shorten certain operations on higher order ranges Jan Beulich
@ 2015-09-29 12:20 ` Andrew Cooper
  2015-09-29 12:57   ` Jan Beulich
  2015-09-29 12:58 ` George Dunlap
  2015-09-29 16:45 ` George Dunlap
  2 siblings, 1 reply; 9+ messages in thread
From: Andrew Cooper @ 2015-09-29 12:20 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Keir Fraser

On 28/09/15 15:30, Jan Beulich wrote:
> Now that p2m->get_entry() always returns a valid order, utilize this
> to accelerate some of the operations in PoD code. (There are two uses
> of p2m->get_entry() left which don't easily lend themselves to this
> optimization.)
>
> Also adjust a few types as needed and remove stale comments from
> p2m_pod_cache_add() (to avoid duplicating them yet another time).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Add a code comment in p2m_pod_decrease_reservation().
>
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -119,20 +119,23 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>  
>      unlock_page_alloc(p2m);
>  
> -    /* Then add the first one to the appropriate populate-on-demand list */
> -    switch(order)
> +    /* Then add to the appropriate populate-on-demand list. */
> +    switch ( order )
>      {
> +    case PAGE_ORDER_1G:
> +        for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
> +            page_list_add_tail(page + i, &p2m->pod.super);
> +        break;
>      case PAGE_ORDER_2M:
> -        page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
> -        p2m->pod.count += 1 << order;
> +        page_list_add_tail(page, &p2m->pod.super);
>          break;
>      case PAGE_ORDER_4K:
> -        page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
> -        p2m->pod.count += 1;
> +        page_list_add_tail(page, &p2m->pod.single);
>          break;
>      default:
>          BUG();
>      }
> +    p2m->pod.count += 1 << order;

1UL

Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-29 12:20 ` Andrew Cooper
@ 2015-09-29 12:57   ` Jan Beulich
  2015-09-29 13:03     ` Andrew Cooper
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2015-09-29 12:57 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, xen-devel, Keir Fraser

>>> On 29.09.15 at 14:20, <andrew.cooper3@citrix.com> wrote:
> On 28/09/15 15:30, Jan Beulich wrote:
>> --- a/xen/arch/x86/mm/p2m-pod.c
>> +++ b/xen/arch/x86/mm/p2m-pod.c
>> @@ -119,20 +119,23 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>>  
>>      unlock_page_alloc(p2m);
>>  
>> -    /* Then add the first one to the appropriate populate-on-demand list */
>> -    switch(order)
>> +    /* Then add to the appropriate populate-on-demand list. */
>> +    switch ( order )
>>      {
>> +    case PAGE_ORDER_1G:
>> +        for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
>> +            page_list_add_tail(page + i, &p2m->pod.super);
>> +        break;
>>      case PAGE_ORDER_2M:
>> -        page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
>> -        p2m->pod.count += 1 << order;
>> +        page_list_add_tail(page, &p2m->pod.super);
>>          break;
>>      case PAGE_ORDER_4K:
>> -        page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
>> -        p2m->pod.count += 1;
>> +        page_list_add_tail(page, &p2m->pod.single);
>>          break;
>>      default:
>>          BUG();
>>      }
>> +    p2m->pod.count += 1 << order;
> 
> 1UL

Not really - the field is a "long" one, so at best 1L or 1U. And then
all the valid order values are visible right above, for none of them
it makes a difference, and there are ample similar uses scattered
around the file (yes, bad examples are no excuse, but in cases
where the suffix doesn't really matter I think it is better to omit it).

> Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Let me know regrading this one,
Jan

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-28 14:30 [PATCH v2] x86/PoD: shorten certain operations on higher order ranges Jan Beulich
  2015-09-29 12:20 ` Andrew Cooper
@ 2015-09-29 12:58 ` George Dunlap
  2015-09-29 16:45 ` George Dunlap
  2 siblings, 0 replies; 9+ messages in thread
From: George Dunlap @ 2015-09-29 12:58 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Andrew Cooper, Keir Fraser

On 28/09/15 15:30, Jan Beulich wrote:
> Now that p2m->get_entry() always returns a valid order, utilize this
> to accelerate some of the operations in PoD code. (There are two uses
> of p2m->get_entry() left which don't easily lend themselves to this
> optimization.)
> 
> Also adjust a few types as needed and remove stale comments from
> p2m_pod_cache_add() (to avoid duplicating them yet another time).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Add a code comment in p2m_pod_decrease_reservation().
> 
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -119,20 +119,23 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>  
>      unlock_page_alloc(p2m);
>  
> -    /* Then add the first one to the appropriate populate-on-demand list */
> -    switch(order)
> +    /* Then add to the appropriate populate-on-demand list. */
> +    switch ( order )
>      {
> +    case PAGE_ORDER_1G:
> +        for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
> +            page_list_add_tail(page + i, &p2m->pod.super);
> +        break;
>      case PAGE_ORDER_2M:
> -        page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
> -        p2m->pod.count += 1 << order;
> +        page_list_add_tail(page, &p2m->pod.super);
>          break;
>      case PAGE_ORDER_4K:
> -        page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
> -        p2m->pod.count += 1;
> +        page_list_add_tail(page, &p2m->pod.single);
>          break;
>      default:
>          BUG();
>      }
> +    p2m->pod.count += 1 << order;
>  
>      return 0;
>  }
> @@ -502,11 +505,10 @@ p2m_pod_decrease_reservation(struct doma
>                               unsigned int order)
>  {
>      int ret=0;
> -    int i;
> +    unsigned long i, n;
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -
> -    int steal_for_cache;
> -    int pod, nonpod, ram;
> +    bool_t steal_for_cache;
> +    long pod, nonpod, ram;
>  
>      gfn_lock(p2m, gpfn, order);
>      pod_lock(p2m);    
> @@ -525,21 +527,21 @@ recount:
>      /* Figure out if we need to steal some freed memory for our cache */
>      steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
>  
> -    /* FIXME: Add contiguous; query for PSE entries? */
> -    for ( i=0; i<(1<<order); i++)
> +    for ( i = 0; i < (1UL << order); i += n )
>      {
>          p2m_access_t a;
>          p2m_type_t t;
> +        unsigned int cur_order;
>  
> -        (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
> -
> +        p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
> +        n = 1UL << min(order, cur_order);
>          if ( t == p2m_populate_on_demand )
> -            pod++;
> +            pod += n;
>          else
>          {
> -            nonpod++;
> +            nonpod += n;
>              if ( p2m_is_ram(t) )
> -                ram++;
> +                ram += n;
>          }
>      }
>  
> @@ -574,41 +576,53 @@ recount:
>       * + There are PoD entries to handle, or
>       * + There is ram left, and we want to steal it
>       */
> -    for ( i=0;
> -          i<(1<<order) && (pod>0 || (steal_for_cache && ram > 0));
> -          i++)
> +    for ( i = 0;
> +          i < (1UL << order) && (pod > 0 || (steal_for_cache && ram > 0));
> +          i += n )
>      {
>          mfn_t mfn;
>          p2m_type_t t;
>          p2m_access_t a;
> +        unsigned int cur_order;
>  
> -        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
> +        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
> +        if ( order < cur_order )
> +            cur_order = order;
> +        n = 1UL << cur_order;
>          if ( t == p2m_populate_on_demand )
>          {
> -            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
> -                          p2m->default_access);
> -            p2m->pod.entry_count--;
> +            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
> +                          p2m_invalid, p2m->default_access);
> +            p2m->pod.entry_count -= n;
>              BUG_ON(p2m->pod.entry_count < 0);
> -            pod--;
> +            pod -= n;
>          }
>          else if ( steal_for_cache && p2m_is_ram(t) )
>          {
> +            /*
> +             * If we need less than 1 << cur_order, we may end up stealing
> +             * more memory here than we actually need. This will be rectified
> +             * below, however; and stealing too much and then freeing what we
> +             * need may allow us to free smaller pages from the cache, and
> +             * avoid breaking up superpages.
> +             */
>              struct page_info *page;
> +            unsigned int j;
>  
>              ASSERT(mfn_valid(mfn));
>  
>              page = mfn_to_page(mfn);
>  
> -            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
> -                          p2m->default_access);
> -            set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
> -
> -            p2m_pod_cache_add(p2m, page, 0);
> +            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
> +                          p2m_invalid, p2m->default_access);
> +            for ( j = 0; j < n; ++j )
> +                set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
> +            p2m_pod_cache_add(p2m, page, cur_order);
>  
>              steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
>  
> -            nonpod--;
> -            ram--;
> +            nonpod -= n;
> +            ram -= n;
>          }
>      }    
>  
> @@ -649,7 +656,8 @@ p2m_pod_zero_check_superpage(struct p2m_
>      p2m_type_t type, type0 = 0;
>      unsigned long * map = NULL;
>      int ret=0, reset = 0;
> -    int i, j;
> +    unsigned long i, n;
> +    unsigned int j;
>      int max_ref = 1;
>      struct domain *d = p2m->domain;
>  
> @@ -668,10 +676,13 @@ p2m_pod_zero_check_superpage(struct p2m_
>  
>      /* Look up the mfns, checking to make sure they're the same mfn
>       * and aligned, and mapping them. */
> -    for ( i=0; i<SUPERPAGE_PAGES; i++ )
> +    for ( i = 0; i < SUPERPAGE_PAGES; i += n )
>      {
>          p2m_access_t a; 
> -        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
> +        unsigned int cur_order;
> +
> +        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
> +        n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);

Again just thinking out loud a bit here -- I wonder how often it will be
the case that we manage to find a contiguous superpage that is not
actually set as a superpage in the p2m.  The only reason this loop is
here in the first place is because when I wrote it there was as yet no
way to get the order of a p2m entry.  I'm now thinking it might be
better to just read the entry and bail if the order is SUPERPAGE_ORDER.

But in any case, this code is correct and doesn't change the end-to-end
functionality AFAICT, so:

Reviewed-by: George Dunlap <george.dunlap@citrix.com>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-29 12:57   ` Jan Beulich
@ 2015-09-29 13:03     ` Andrew Cooper
  0 siblings, 0 replies; 9+ messages in thread
From: Andrew Cooper @ 2015-09-29 13:03 UTC (permalink / raw)
  To: Jan Beulich; +Cc: George Dunlap, xen-devel, Keir Fraser

On 29/09/15 13:57, Jan Beulich wrote:
>>>> On 29.09.15 at 14:20, <andrew.cooper3@citrix.com> wrote:
>> On 28/09/15 15:30, Jan Beulich wrote:
>>> --- a/xen/arch/x86/mm/p2m-pod.c
>>> +++ b/xen/arch/x86/mm/p2m-pod.c
>>> @@ -119,20 +119,23 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>>>  
>>>      unlock_page_alloc(p2m);
>>>  
>>> -    /* Then add the first one to the appropriate populate-on-demand list */
>>> -    switch(order)
>>> +    /* Then add to the appropriate populate-on-demand list. */
>>> +    switch ( order )
>>>      {
>>> +    case PAGE_ORDER_1G:
>>> +        for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
>>> +            page_list_add_tail(page + i, &p2m->pod.super);
>>> +        break;
>>>      case PAGE_ORDER_2M:
>>> -        page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
>>> -        p2m->pod.count += 1 << order;
>>> +        page_list_add_tail(page, &p2m->pod.super);
>>>          break;
>>>      case PAGE_ORDER_4K:
>>> -        page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
>>> -        p2m->pod.count += 1;
>>> +        page_list_add_tail(page, &p2m->pod.single);
>>>          break;
>>>      default:
>>>          BUG();
>>>      }
>>> +    p2m->pod.count += 1 << order;
>> 1UL
> Not really - the field is a "long" one, so at best 1L or 1U. And then
> all the valid order values are visible right above, for none of them
> it makes a difference, and there are ample similar uses scattered
> around the file (yes, bad examples are no excuse, but in cases
> where the suffix doesn't really matter I think it is better to omit it).
>
>> Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Let me know regrading this one,

For sanity sake, I would suggest going with 1L as one less place to go
wrong when we gain 512GB superpages.

~Andrew

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-28 14:30 [PATCH v2] x86/PoD: shorten certain operations on higher order ranges Jan Beulich
  2015-09-29 12:20 ` Andrew Cooper
  2015-09-29 12:58 ` George Dunlap
@ 2015-09-29 16:45 ` George Dunlap
  2015-09-30 12:12   ` Jan Beulich
  2 siblings, 1 reply; 9+ messages in thread
From: George Dunlap @ 2015-09-29 16:45 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Tim Deegan, Keir Fraser, Andrew Cooper

On Mon, Sep 28, 2015 at 3:30 PM, Jan Beulich <JBeulich@suse.com> wrote:
> Now that p2m->get_entry() always returns a valid order, utilize this
> to accelerate some of the operations in PoD code. (There are two uses
> of p2m->get_entry() left which don't easily lend themselves to this
> optimization.)
>
> Also adjust a few types as needed and remove stale comments from
> p2m_pod_cache_add() (to avoid duplicating them yet another time).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

So I was just looking at my own suggestion, and I want to withdraw my
"reviewed-by" temporarily to ask a question...

> @@ -649,7 +656,8 @@ p2m_pod_zero_check_superpage(struct p2m_
>      p2m_type_t type, type0 = 0;
>      unsigned long * map = NULL;
>      int ret=0, reset = 0;
> -    int i, j;
> +    unsigned long i, n;
> +    unsigned int j;
>      int max_ref = 1;
>      struct domain *d = p2m->domain;
>
> @@ -668,10 +676,13 @@ p2m_pod_zero_check_superpage(struct p2m_
>
>      /* Look up the mfns, checking to make sure they're the same mfn
>       * and aligned, and mapping them. */
> -    for ( i=0; i<SUPERPAGE_PAGES; i++ )
> +    for ( i = 0; i < SUPERPAGE_PAGES; i += n )
>      {
>          p2m_access_t a;
> -        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
> +        unsigned int cur_order;
> +
> +        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
> +        n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
>
>          if ( i == 0 )
>          {

The check at the bottom of this loop not only checks to see that all
the mfns are contiguous; they also check that each of the indivual
(4k) mfns has certain properties; namely:

+ None of the mfns are used as pagetables, or allocated via xenheap
+ None of the mfns are likely to be mapped elsewhere (refcount 2 or
less for shadow, 1 for hap)

This change makes it so that if the p2m entry is 2M or larger, only
the first mfn actually gets checked.  But I don't think we can assume
that just because the first page of a superpage is not used as a
pagetable, or mapped elsewhere, that none of the pages are.  (Please
correct me if I'm wrong here.)

If that's the case, then we would need to run this loop all the way
through, even if cur_order is SUPERPAGE_ORDER.

I suppose we could have two loops, one which checks for superpage
integrity, and another which checks for the necessary properties for
each individual mfn in the superpage.

Thoughts?

 -George

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-29 16:45 ` George Dunlap
@ 2015-09-30 12:12   ` Jan Beulich
  2015-09-30 14:23     ` George Dunlap
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2015-09-30 12:12 UTC (permalink / raw)
  To: George Dunlap; +Cc: Andrew Cooper, Tim Deegan, Keir Fraser, xen-devel

>>> On 29.09.15 at 18:45, <George.Dunlap@eu.citrix.com> wrote:
> On Mon, Sep 28, 2015 at 3:30 PM, Jan Beulich <JBeulich@suse.com> wrote:
>> Now that p2m->get_entry() always returns a valid order, utilize this
>> to accelerate some of the operations in PoD code. (There are two uses
>> of p2m->get_entry() left which don't easily lend themselves to this
>> optimization.)
>>
>> Also adjust a few types as needed and remove stale comments from
>> p2m_pod_cache_add() (to avoid duplicating them yet another time).
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> So I was just looking at my own suggestion, and I want to withdraw my
> "reviewed-by" temporarily to ask a question...

Okay, meaning we'd need to revert.

>> @@ -649,7 +656,8 @@ p2m_pod_zero_check_superpage(struct p2m_
>>      p2m_type_t type, type0 = 0;
>>      unsigned long * map = NULL;
>>      int ret=0, reset = 0;
>> -    int i, j;
>> +    unsigned long i, n;
>> +    unsigned int j;
>>      int max_ref = 1;
>>      struct domain *d = p2m->domain;
>>
>> @@ -668,10 +676,13 @@ p2m_pod_zero_check_superpage(struct p2m_
>>
>>      /* Look up the mfns, checking to make sure they're the same mfn
>>       * and aligned, and mapping them. */
>> -    for ( i=0; i<SUPERPAGE_PAGES; i++ )
>> +    for ( i = 0; i < SUPERPAGE_PAGES; i += n )
>>      {
>>          p2m_access_t a;
>> -        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
>> +        unsigned int cur_order;
>> +
>> +        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
>> +        n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
>>
>>          if ( i == 0 )
>>          {
> 
> The check at the bottom of this loop not only checks to see that all
> the mfns are contiguous; they also check that each of the indivual
> (4k) mfns has certain properties; namely:
> 
> + None of the mfns are used as pagetables, or allocated via xenheap
> + None of the mfns are likely to be mapped elsewhere (refcount 2 or
> less for shadow, 1 for hap)
> 
> This change makes it so that if the p2m entry is 2M or larger, only
> the first mfn actually gets checked.  But I don't think we can assume
> that just because the first page of a superpage is not used as a
> pagetable, or mapped elsewhere, that none of the pages are.  (Please
> correct me if I'm wrong here.)
> 
> If that's the case, then we would need to run this loop all the way
> through, even if cur_order is SUPERPAGE_ORDER.
> 
> I suppose we could have two loops, one which checks for superpage
> integrity, and another which checks for the necessary properties for
> each individual mfn in the superpage.
> 
> Thoughts?

You're right - those checks not depending on individual page
attributes in that condition have misled me to assume all are of
that kind. We indeed do need an inner loop dealing with per-page 
properties. I suppose you won't mind cleaning this up a little in the
course of the anyway necessary transformation.

Jan

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-30 12:12   ` Jan Beulich
@ 2015-09-30 14:23     ` George Dunlap
  2015-09-30 15:40       ` Jan Beulich
  0 siblings, 1 reply; 9+ messages in thread
From: George Dunlap @ 2015-09-30 14:23 UTC (permalink / raw)
  To: Jan Beulich, George Dunlap
  Cc: Andrew Cooper, Tim Deegan, Keir Fraser, xen-devel

On 30/09/15 13:12, Jan Beulich wrote:
>>>> On 29.09.15 at 18:45, <George.Dunlap@eu.citrix.com> wrote:
>> On Mon, Sep 28, 2015 at 3:30 PM, Jan Beulich <JBeulich@suse.com> wrote:
>>> Now that p2m->get_entry() always returns a valid order, utilize this
>>> to accelerate some of the operations in PoD code. (There are two uses
>>> of p2m->get_entry() left which don't easily lend themselves to this
>>> optimization.)
>>>
>>> Also adjust a few types as needed and remove stale comments from
>>> p2m_pod_cache_add() (to avoid duplicating them yet another time).
>>>
>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>> So I was just looking at my own suggestion, and I want to withdraw my
>> "reviewed-by" temporarily to ask a question...
> 
> Okay, meaning we'd need to revert.
> 
>>> @@ -649,7 +656,8 @@ p2m_pod_zero_check_superpage(struct p2m_
>>>      p2m_type_t type, type0 = 0;
>>>      unsigned long * map = NULL;
>>>      int ret=0, reset = 0;
>>> -    int i, j;
>>> +    unsigned long i, n;
>>> +    unsigned int j;
>>>      int max_ref = 1;
>>>      struct domain *d = p2m->domain;
>>>
>>> @@ -668,10 +676,13 @@ p2m_pod_zero_check_superpage(struct p2m_
>>>
>>>      /* Look up the mfns, checking to make sure they're the same mfn
>>>       * and aligned, and mapping them. */
>>> -    for ( i=0; i<SUPERPAGE_PAGES; i++ )
>>> +    for ( i = 0; i < SUPERPAGE_PAGES; i += n )
>>>      {
>>>          p2m_access_t a;
>>> -        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
>>> +        unsigned int cur_order;
>>> +
>>> +        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
>>> +        n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
>>>
>>>          if ( i == 0 )
>>>          {
>>
>> The check at the bottom of this loop not only checks to see that all
>> the mfns are contiguous; they also check that each of the indivual
>> (4k) mfns has certain properties; namely:
>>
>> + None of the mfns are used as pagetables, or allocated via xenheap
>> + None of the mfns are likely to be mapped elsewhere (refcount 2 or
>> less for shadow, 1 for hap)
>>
>> This change makes it so that if the p2m entry is 2M or larger, only
>> the first mfn actually gets checked.  But I don't think we can assume
>> that just because the first page of a superpage is not used as a
>> pagetable, or mapped elsewhere, that none of the pages are.  (Please
>> correct me if I'm wrong here.)
>>
>> If that's the case, then we would need to run this loop all the way
>> through, even if cur_order is SUPERPAGE_ORDER.
>>
>> I suppose we could have two loops, one which checks for superpage
>> integrity, and another which checks for the necessary properties for
>> each individual mfn in the superpage.
>>
>> Thoughts?
> 
> You're right - those checks not depending on individual page
> attributes in that condition have misled me to assume all are of
> that kind. We indeed do need an inner loop dealing with per-page 
> properties. I suppose you won't mind cleaning this up a little in the
> course of the anyway necessary transformation.

Yes, I've got a prototype patch I'll probably send out tomorrow.

 -George

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] x86/PoD: shorten certain operations on higher order ranges
  2015-09-30 14:23     ` George Dunlap
@ 2015-09-30 15:40       ` Jan Beulich
  0 siblings, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2015-09-30 15:40 UTC (permalink / raw)
  To: George Dunlap, George Dunlap
  Cc: Andrew Cooper, Tim Deegan, Keir Fraser, xen-devel

>>> On 30.09.15 at 16:23, <george.dunlap@citrix.com> wrote:
> On 30/09/15 13:12, Jan Beulich wrote:
>>>>> On 29.09.15 at 18:45, <George.Dunlap@eu.citrix.com> wrote:
>>> On Mon, Sep 28, 2015 at 3:30 PM, Jan Beulich <JBeulich@suse.com> wrote:
>>>> Now that p2m->get_entry() always returns a valid order, utilize this
>>>> to accelerate some of the operations in PoD code. (There are two uses
>>>> of p2m->get_entry() left which don't easily lend themselves to this
>>>> optimization.)
>>>>
>>>> Also adjust a few types as needed and remove stale comments from
>>>> p2m_pod_cache_add() (to avoid duplicating them yet another time).
>>>>
>>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>>
>>> So I was just looking at my own suggestion, and I want to withdraw my
>>> "reviewed-by" temporarily to ask a question...
>> 
>> Okay, meaning we'd need to revert.
>> 
>>>> @@ -649,7 +656,8 @@ p2m_pod_zero_check_superpage(struct p2m_
>>>>      p2m_type_t type, type0 = 0;
>>>>      unsigned long * map = NULL;
>>>>      int ret=0, reset = 0;
>>>> -    int i, j;
>>>> +    unsigned long i, n;
>>>> +    unsigned int j;
>>>>      int max_ref = 1;
>>>>      struct domain *d = p2m->domain;
>>>>
>>>> @@ -668,10 +676,13 @@ p2m_pod_zero_check_superpage(struct p2m_
>>>>
>>>>      /* Look up the mfns, checking to make sure they're the same mfn
>>>>       * and aligned, and mapping them. */
>>>> -    for ( i=0; i<SUPERPAGE_PAGES; i++ )
>>>> +    for ( i = 0; i < SUPERPAGE_PAGES; i += n )
>>>>      {
>>>>          p2m_access_t a;
>>>> -        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
>>>> +        unsigned int cur_order;
>>>> +
>>>> +        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
>>>> +        n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
>>>>
>>>>          if ( i == 0 )
>>>>          {
>>>
>>> The check at the bottom of this loop not only checks to see that all
>>> the mfns are contiguous; they also check that each of the indivual
>>> (4k) mfns has certain properties; namely:
>>>
>>> + None of the mfns are used as pagetables, or allocated via xenheap
>>> + None of the mfns are likely to be mapped elsewhere (refcount 2 or
>>> less for shadow, 1 for hap)
>>>
>>> This change makes it so that if the p2m entry is 2M or larger, only
>>> the first mfn actually gets checked.  But I don't think we can assume
>>> that just because the first page of a superpage is not used as a
>>> pagetable, or mapped elsewhere, that none of the pages are.  (Please
>>> correct me if I'm wrong here.)
>>>
>>> If that's the case, then we would need to run this loop all the way
>>> through, even if cur_order is SUPERPAGE_ORDER.
>>>
>>> I suppose we could have two loops, one which checks for superpage
>>> integrity, and another which checks for the necessary properties for
>>> each individual mfn in the superpage.
>>>
>>> Thoughts?
>> 
>> You're right - those checks not depending on individual page
>> attributes in that condition have misled me to assume all are of
>> that kind. We indeed do need an inner loop dealing with per-page 
>> properties. I suppose you won't mind cleaning this up a little in the
>> course of the anyway necessary transformation.
> 
> Yes, I've got a prototype patch I'll probably send out tomorrow.

Oh, I've got one too (actually the original, now reverted one
suitably fixed up). Just didn't get around to test it yet.

Jan

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2015-09-30 15:40 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-09-28 14:30 [PATCH v2] x86/PoD: shorten certain operations on higher order ranges Jan Beulich
2015-09-29 12:20 ` Andrew Cooper
2015-09-29 12:57   ` Jan Beulich
2015-09-29 13:03     ` Andrew Cooper
2015-09-29 12:58 ` George Dunlap
2015-09-29 16:45 ` George Dunlap
2015-09-30 12:12   ` Jan Beulich
2015-09-30 14:23     ` George Dunlap
2015-09-30 15:40       ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.