All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] x86: XSA-246 / -247 follow-up
@ 2017-12-04 10:49 Jan Beulich
  2017-12-04 11:06 ` [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
                   ` (3 more replies)
  0 siblings, 4 replies; 19+ messages in thread
From: Jan Beulich @ 2017-12-04 10:49 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

1: PoD: correctly handle non-order-0 decrease-reservation requests
2: mm: drop yet another relic of translated PV domains from new_guest_cr3()
3: p2m: force return value checking of p2m_set_entry()

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-04 10:49 [PATCH 0/3] x86: XSA-246 / -247 follow-up Jan Beulich
@ 2017-12-04 11:06 ` Jan Beulich
  2017-12-04 15:58   ` Andrew Cooper
  2017-12-07 12:56   ` George Dunlap
  2017-12-04 11:06 ` [PATCH 2/3] x86/mm: drop yet another relic of translated PV domains from new_guest_cr3() Jan Beulich
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 19+ messages in thread
From: Jan Beulich @ 2017-12-04 11:06 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall

p2m_pod_decrease_reservation() returning just (not) all-done is not
sufficient for the caller: If some pages were processed,
guest_remove_page() returning an error for those pages is the expected
result rather than an indication of a problem. Make guest_remove_page()
return a distinct error code for this very case, and special case
handling in case of seeing this error code in decrease_reservation().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -393,10 +393,10 @@ int guest_physmap_mark_populate_on_deman
     return -ENOSYS;
 }
 
-int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
-                                 unsigned int order)
+unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
+                                           unsigned int order)
 {
-    return -ENOSYS;
+    return 0;
 }
 
 static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -510,11 +510,10 @@ p2m_pod_zero_check_superpage(struct p2m_
  * Once both of these functions have been completed, we can return and
  * allow decrease_reservation() to handle everything else.
  */
-int
+unsigned long
 p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
 {
-    int ret = 0;
-    unsigned long i, n;
+    unsigned long ret = 0, i, n;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     bool_t steal_for_cache;
     long pod, nonpod, ram;
@@ -577,9 +576,9 @@ p2m_pod_decrease_reservation(struct doma
                 domain_crash(d);
             goto out_unlock;
         }
-        p2m->pod.entry_count -= 1UL << order;
+        ret = 1UL << order;
+        p2m->pod.entry_count -= ret;
         BUG_ON(p2m->pod.entry_count < 0);
-        ret = 1;
         goto out_entry_check;
     }
 
@@ -630,6 +629,7 @@ p2m_pod_decrease_reservation(struct doma
             p2m->pod.entry_count -= n;
             BUG_ON(p2m->pod.entry_count < 0);
             pod -= n;
+            ret += n;
         }
         else if ( steal_for_cache && p2m_is_ram(t) )
         {
@@ -664,16 +664,10 @@ p2m_pod_decrease_reservation(struct doma
 
             nonpod -= n;
             ram -= n;
+            ret += n;
         }
     }
 
-    /*
-     * If there are no more non-PoD entries, tell decrease_reservation() that
-     * there's nothing left to do.
-     */
-    if ( nonpod == 0 )
-        ret = 1;
-
 out_entry_check:
     /* If we've reduced our "liabilities" beyond our "assets", free some */
     if ( p2m->pod.entry_count < p2m->pod.count )
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -284,13 +284,15 @@ int guest_remove_page(struct domain *d,
 
 #ifdef CONFIG_X86
     mfn = get_gfn_query(d, gmfn, &p2mt);
+    if ( unlikely(p2mt == p2m_invalid) || unlikely(p2mt == p2m_mmio_dm) )
+        return -ENOENT;
     if ( unlikely(p2m_is_paging(p2mt)) )
     {
         rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
-        put_gfn(d, gmfn);
-
         if ( rc )
-            return rc;
+            goto out_put_gfn;
+
+        put_gfn(d, gmfn);
 
         /* If the page hasn't yet been paged out, there is an
          * actual page that needs to be released. */
@@ -308,9 +310,7 @@ int guest_remove_page(struct domain *d,
     if ( p2mt == p2m_mmio_direct )
     {
         rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K);
-        put_gfn(d, gmfn);
-
-        return rc;
+        goto out_put_gfn;
     }
 #else
     mfn = gfn_to_mfn(d, _gfn(gmfn));
@@ -335,10 +335,8 @@ int guest_remove_page(struct domain *d,
         rc = mem_sharing_unshare_page(d, gmfn, 0);
         if ( rc )
         {
-            put_gfn(d, gmfn);
             (void)mem_sharing_notify_enomem(d, gmfn, 0);
-
-            return rc;
+            goto out_put_gfn;
         }
         /* Maybe the mfn changed */
         mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
@@ -375,9 +373,10 @@ int guest_remove_page(struct domain *d,
         put_page(page);
 
     put_page(page);
+ out_put_gfn: __maybe_unused
     put_gfn(d, gmfn);
 
-    return rc;
+    return rc != -ENOENT ? rc : -EINVAL;
 }
 
 static void decrease_reservation(struct memop_args *a)
@@ -392,6 +391,8 @@ static void decrease_reservation(struct
 
     for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
+        unsigned long pod_done;
+
         if ( i != a->nr_done && hypercall_preempt_check() )
         {
             a->preempted = 1;
@@ -416,14 +417,25 @@ static void decrease_reservation(struct
         }
 
         /* See if populate-on-demand wants to handle this */
-        if ( is_hvm_domain(a->domain)
-             && p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
-                                             a->extent_order) )
-            continue;
+        pod_done = is_hvm_domain(a->domain) ?
+                   p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
+                                                a->extent_order) : 0;
 
-        for ( j = 0; j < (1 << a->extent_order); j++ )
-            if ( guest_remove_page(a->domain, gmfn + j) )
+        for ( j = 0; j + pod_done < (1UL << a->extent_order); j++ )
+        {
+            switch ( guest_remove_page(a->domain, gmfn + j) )
+            {
+            case 0:
+                break;
+            case -ENOENT:
+                if ( !pod_done )
+                    goto out;
+                --pod_done;
+                break;
+            default:
                 goto out;
+            }
+        }
     }
 
  out:
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -26,9 +26,9 @@ int unmap_mmio_regions(struct domain *d,
 
 /*
  * Call when decreasing memory reservation to handle PoD entries properly.
- * Will return '1' if all entries were handled and nothing more need be done.
+ * Returns the number of pages that were successfully processed.
  */
-int
+unsigned long
 p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
                              unsigned int order);
 



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 2/3] x86/mm: drop yet another relic of translated PV domains from new_guest_cr3()
  2017-12-04 10:49 [PATCH 0/3] x86: XSA-246 / -247 follow-up Jan Beulich
  2017-12-04 11:06 ` [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
@ 2017-12-04 11:06 ` Jan Beulich
  2017-12-04 15:58   ` Andrew Cooper
  2017-12-04 11:07 ` [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
  2017-12-20  9:25 ` [PATCH v2 0/2] x86: XSA-246 / -247 follow-up Jan Beulich
  3 siblings, 1 reply; 19+ messages in thread
From: Jan Beulich @ 2017-12-04 11:06 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

The function can be called for PV domains only, which commit 5a0b9fba92
("x86/mm: drop further relics of translated PV domains") sort of
realized, but not fully.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2846,9 +2846,7 @@ int new_guest_cr3(mfn_t mfn)
         return 0;
     }
 
-    rc = paging_mode_refcounts(d)
-         ? (get_page_from_mfn(mfn, d) ? 0 : -EINVAL)
-         : get_page_and_type_from_mfn(mfn, PGT_root_page_table, d, 0, 1);
+    rc = get_page_and_type_from_mfn(mfn, PGT_root_page_table, d, 0, 1);
     switch ( rc )
     {
     case 0:




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry()
  2017-12-04 10:49 [PATCH 0/3] x86: XSA-246 / -247 follow-up Jan Beulich
  2017-12-04 11:06 ` [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
  2017-12-04 11:06 ` [PATCH 2/3] x86/mm: drop yet another relic of translated PV domains from new_guest_cr3() Jan Beulich
@ 2017-12-04 11:07 ` Jan Beulich
  2017-12-04 16:03   ` Andrew Cooper
  2017-12-05  1:47   ` Tian, Kevin
  2017-12-20  9:25 ` [PATCH v2 0/2] x86: XSA-246 / -247 follow-up Jan Beulich
  3 siblings, 2 replies; 19+ messages in thread
From: Jan Beulich @ 2017-12-04 11:07 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Kevin Tian, Jun Nakajima

As XSAs 246 and 247 have shown, not doing so is rather dangerous.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1550,9 +1550,11 @@ void p2m_mem_paging_populate(struct doma
         if ( p2mt == p2m_ram_paging_out )
             req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
 
-        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
+        rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
     }
     gfn_unlock(p2m, gfn, 0);
+    if ( rc < 0 )
+        return;
 
     /* Pause domain if request came from guest and gfn has paging type */
     if ( p2m_is_paging(p2mt) && v->domain == d )
@@ -1700,10 +1702,12 @@ void p2m_mem_paging_resume(struct domain
          */
         if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
         {
-            p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                          paging_mode_log_dirty(d) ? p2m_ram_logdirty :
-                          p2m_ram_rw, a);
-            set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
+            int rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                                   paging_mode_log_dirty(d) ? p2m_ram_logdirty :
+                                   p2m_ram_rw, a);
+
+            if ( !rc )
+                set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
         }
         gfn_unlock(p2m, gfn, 0);
     }
@@ -2463,9 +2467,9 @@ static void p2m_reset_altp2m(struct p2m_
     p2m->max_remapped_gfn = 0;
 }
 
-void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
-                                 mfn_t mfn, unsigned int page_order,
-                                 p2m_type_t p2mt, p2m_access_t p2ma)
+int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
+                                mfn_t mfn, unsigned int page_order,
+                                p2m_type_t p2mt, p2m_access_t p2ma)
 {
     struct p2m_domain *p2m;
     p2m_access_t a;
@@ -2474,9 +2478,10 @@ void p2m_altp2m_propagate_change(struct
     unsigned int i;
     unsigned int reset_count = 0;
     unsigned int last_reset_idx = ~0;
+    int ret = 0;
 
     if ( !altp2m_active(d) )
-        return;
+        return 0;
 
     altp2m_list_lock(d);
 
@@ -2515,17 +2520,25 @@ void p2m_altp2m_propagate_change(struct
                     p2m_unlock(p2m);
                 }
 
-                goto out;
+                ret = 0;
+                break;
             }
         }
         else if ( !mfn_eq(m, INVALID_MFN) )
-            p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+        {
+            int rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+
+            /* Best effort: Don't bail on error. */
+            if ( !ret )
+                ret = rc;
+        }
 
         __put_gfn(p2m, gfn_x(gfn));
     }
 
- out:
     altp2m_list_unlock(d);
+
+    return ret;
 }
 
 /*** Audit ***/
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -904,7 +904,11 @@ out:
         ept_free_entry(p2m, &old_entry, target);
 
     if ( entry_written && p2m_is_hostp2m(p2m) )
-        p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
+    {
+        ret = p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
+        if ( !rc )
+            rc = ret;
+    }
 
     return rc;
 }
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -689,8 +689,9 @@ void p2m_free_ptp(struct p2m_domain *p2m
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need
  * a call to put_gfn afterwards/ */
-int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
-                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
+int __must_check p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
+                               unsigned int page_order, p2m_type_t p2mt,
+                               p2m_access_t p2ma);
 
 /* Set up function pointers for PT implementation: only for use by p2m code */
 extern void p2m_pt_init(struct p2m_domain *p2m);
@@ -830,9 +831,9 @@ int p2m_change_altp2m_gfn(struct domain
                           gfn_t old_gfn, gfn_t new_gfn);
 
 /* Propagate a host p2m change to all alternate p2m's */
-void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
-                                 mfn_t mfn, unsigned int page_order,
-                                 p2m_type_t p2mt, p2m_access_t p2ma);
+int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
+                                mfn_t mfn, unsigned int page_order,
+                                p2m_type_t p2mt, p2m_access_t p2ma);
 
 /*
  * p2m type to IOMMU flags



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-04 11:06 ` [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
@ 2017-12-04 15:58   ` Andrew Cooper
  2017-12-05  7:42     ` Jan Beulich
  2017-12-07 12:56   ` George Dunlap
  1 sibling, 1 reply; 19+ messages in thread
From: Andrew Cooper @ 2017-12-04 15:58 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Tim Deegan,
	Ian Jackson, Julien Grall

On 04/12/17 11:06, Jan Beulich wrote:
> p2m_pod_decrease_reservation() returning just (not) all-done is not

This would be easier to parse as "returning only all-done is not"

> sufficient for the caller: If some pages were processed,
> guest_remove_page() returning an error for those pages is the expected
> result rather than an indication of a problem. Make guest_remove_page()
> return a distinct error code for this very case, and special case
> handling in case of seeing this error code in decrease_reservation().
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -393,10 +393,10 @@ int guest_physmap_mark_populate_on_deman
>      return -ENOSYS;
>  }
>  
> -int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> -                                 unsigned int order)
> +unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> +                                           unsigned int order)
>  {
> -    return -ENOSYS;
> +    return 0;
>  }
>  
>  static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -510,11 +510,10 @@ p2m_pod_zero_check_superpage(struct p2m_
>   * Once both of these functions have been completed, we can return and
>   * allow decrease_reservation() to handle everything else.
>   */
> -int
> +unsigned long
>  p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
>  {
> -    int ret = 0;
> -    unsigned long i, n;
> +    unsigned long ret = 0, i, n;
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
>      bool_t steal_for_cache;
>      long pod, nonpod, ram;
> @@ -577,9 +576,9 @@ p2m_pod_decrease_reservation(struct doma
>                  domain_crash(d);
>              goto out_unlock;
>          }
> -        p2m->pod.entry_count -= 1UL << order;
> +        ret = 1UL << order;
> +        p2m->pod.entry_count -= ret;
>          BUG_ON(p2m->pod.entry_count < 0);
> -        ret = 1;
>          goto out_entry_check;
>      }
>  
> @@ -630,6 +629,7 @@ p2m_pod_decrease_reservation(struct doma
>              p2m->pod.entry_count -= n;
>              BUG_ON(p2m->pod.entry_count < 0);
>              pod -= n;
> +            ret += n;
>          }
>          else if ( steal_for_cache && p2m_is_ram(t) )
>          {
> @@ -664,16 +664,10 @@ p2m_pod_decrease_reservation(struct doma
>  
>              nonpod -= n;
>              ram -= n;
> +            ret += n;
>          }
>      }
>  
> -    /*
> -     * If there are no more non-PoD entries, tell decrease_reservation() that
> -     * there's nothing left to do.
> -     */
> -    if ( nonpod == 0 )
> -        ret = 1;
> -
>  out_entry_check:
>      /* If we've reduced our "liabilities" beyond our "assets", free some */
>      if ( p2m->pod.entry_count < p2m->pod.count )
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -284,13 +284,15 @@ int guest_remove_page(struct domain *d,
>  
>  #ifdef CONFIG_X86
>      mfn = get_gfn_query(d, gmfn, &p2mt);
> +    if ( unlikely(p2mt == p2m_invalid) || unlikely(p2mt == p2m_mmio_dm) )
> +        return -ENOENT;

Newline.

>      if ( unlikely(p2m_is_paging(p2mt)) )
>      {
>          rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);

Somewhere in this callchain, you truncate unsigned long to int.  It is
ok (I think) at the moment because ORDER_1G fits within int, but is
liable to break subtly in the future.

> -        put_gfn(d, gmfn);
> -
>          if ( rc )
> -            return rc;
> +            goto out_put_gfn;
> +
> +        put_gfn(d, gmfn);
>  
>          /* If the page hasn't yet been paged out, there is an
>           * actual page that needs to be released. */
> @@ -308,9 +310,7 @@ int guest_remove_page(struct domain *d,
>      if ( p2mt == p2m_mmio_direct )
>      {
>          rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K);
> -        put_gfn(d, gmfn);
> -
> -        return rc;
> +        goto out_put_gfn;
>      }
>  #else
>      mfn = gfn_to_mfn(d, _gfn(gmfn));
> @@ -335,10 +335,8 @@ int guest_remove_page(struct domain *d,
>          rc = mem_sharing_unshare_page(d, gmfn, 0);
>          if ( rc )
>          {
> -            put_gfn(d, gmfn);
>              (void)mem_sharing_notify_enomem(d, gmfn, 0);
> -
> -            return rc;
> +            goto out_put_gfn;
>          }
>          /* Maybe the mfn changed */
>          mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
> @@ -375,9 +373,10 @@ int guest_remove_page(struct domain *d,
>          put_page(page);
>  
>      put_page(page);
> + out_put_gfn: __maybe_unused

What is this annotation for?

~Andrew

>      put_gfn(d, gmfn);
>  
> -    return rc;
> +    return rc != -ENOENT ? rc : -EINVAL;
>  }
>  
>  static void decrease_reservation(struct memop_args *a)
> @@ -392,6 +391,8 @@ static void decrease_reservation(struct
>  
>      for ( i = a->nr_done; i < a->nr_extents; i++ )
>      {
> +        unsigned long pod_done;
> +
>          if ( i != a->nr_done && hypercall_preempt_check() )
>          {
>              a->preempted = 1;
> @@ -416,14 +417,25 @@ static void decrease_reservation(struct
>          }
>  
>          /* See if populate-on-demand wants to handle this */
> -        if ( is_hvm_domain(a->domain)
> -             && p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
> -                                             a->extent_order) )
> -            continue;
> +        pod_done = is_hvm_domain(a->domain) ?
> +                   p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
> +                                                a->extent_order) : 0;
>  
> -        for ( j = 0; j < (1 << a->extent_order); j++ )
> -            if ( guest_remove_page(a->domain, gmfn + j) )
> +        for ( j = 0; j + pod_done < (1UL << a->extent_order); j++ )
> +        {
> +            switch ( guest_remove_page(a->domain, gmfn + j) )
> +            {
> +            case 0:
> +                break;
> +            case -ENOENT:
> +                if ( !pod_done )
> +                    goto out;
> +                --pod_done;
> +                break;
> +            default:
>                  goto out;
> +            }
> +        }
>      }
>  
>   out:
> --- a/xen/include/xen/p2m-common.h
> +++ b/xen/include/xen/p2m-common.h
> @@ -26,9 +26,9 @@ int unmap_mmio_regions(struct domain *d,
>  
>  /*
>   * Call when decreasing memory reservation to handle PoD entries properly.
> - * Will return '1' if all entries were handled and nothing more need be done.
> + * Returns the number of pages that were successfully processed.
>   */
> -int
> +unsigned long
>  p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
>                               unsigned int order);
>  
>
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/3] x86/mm: drop yet another relic of translated PV domains from new_guest_cr3()
  2017-12-04 11:06 ` [PATCH 2/3] x86/mm: drop yet another relic of translated PV domains from new_guest_cr3() Jan Beulich
@ 2017-12-04 15:58   ` Andrew Cooper
  0 siblings, 0 replies; 19+ messages in thread
From: Andrew Cooper @ 2017-12-04 15:58 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap

On 04/12/17 11:06, Jan Beulich wrote:
> The function can be called for PV domains only, which commit 5a0b9fba92
> ("x86/mm: drop further relics of translated PV domains") sort of
> realized, but not fully.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry()
  2017-12-04 11:07 ` [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
@ 2017-12-04 16:03   ` Andrew Cooper
  2017-12-05  1:47   ` Tian, Kevin
  1 sibling, 0 replies; 19+ messages in thread
From: Andrew Cooper @ 2017-12-04 16:03 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Kevin Tian, Jun Nakajima

On 04/12/17 11:07, Jan Beulich wrote:
> As XSAs 246 and 247 have shown, not doing so is rather dangerous.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry()
  2017-12-04 11:07 ` [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
  2017-12-04 16:03   ` Andrew Cooper
@ 2017-12-05  1:47   ` Tian, Kevin
  1 sibling, 0 replies; 19+ messages in thread
From: Tian, Kevin @ 2017-12-05  1:47 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Andrew Cooper, Nakajima, Jun

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Monday, December 4, 2017 7:07 PM
> 
> As XSAs 246 and 247 have shown, not doing so is rather dangerous.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-04 15:58   ` Andrew Cooper
@ 2017-12-05  7:42     ` Jan Beulich
  0 siblings, 0 replies; 19+ messages in thread
From: Jan Beulich @ 2017-12-05  7:42 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Stefano Stabellini, Wei Liu, GeorgeDunlap, Tim Deegan,
	Ian Jackson, Julien Grall, xen-devel

>>> On 04.12.17 at 16:58, <andrew.cooper3@citrix.com> wrote:
> On 04/12/17 11:06, Jan Beulich wrote:
>> --- a/xen/common/memory.c
>> +++ b/xen/common/memory.c
>> @@ -284,13 +284,15 @@ int guest_remove_page(struct domain *d,
>>  
>>  #ifdef CONFIG_X86
>>      mfn = get_gfn_query(d, gmfn, &p2mt);
>> +    if ( unlikely(p2mt == p2m_invalid) || unlikely(p2mt == p2m_mmio_dm) )
>> +        return -ENOENT;
> 
> Newline.
> 
>>      if ( unlikely(p2m_is_paging(p2mt)) )
>>      {
>>          rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
> 
> Somewhere in this callchain, you truncate unsigned long to int.  It is
> ok (I think) at the moment because ORDER_1G fits within int, but is
> liable to break subtly in the future.

I don't understand this: I can't seem to see gfn or mfn to be truncated
(and that would be a pre-existing problem then afaict). And passing
order values as unsigned long is plain pointless - even with 64-bit
frame numbers this can't exceed 64, i.e. is limited to a 6-bit value in
practice. Please clarify what truncation you're suspecting.

>> @@ -375,9 +373,10 @@ int guest_remove_page(struct domain *d,
>>          put_page(page);
>>  
>>      put_page(page);
>> + out_put_gfn: __maybe_unused
> 
> What is this annotation for?

For the !CONFIG_X86 case.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-04 11:06 ` [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
  2017-12-04 15:58   ` Andrew Cooper
@ 2017-12-07 12:56   ` George Dunlap
  2017-12-07 13:07     ` Jan Beulich
  1 sibling, 1 reply; 19+ messages in thread
From: George Dunlap @ 2017-12-07 12:56 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall

On 12/04/2017 11:06 AM, Jan Beulich wrote:
> p2m_pod_decrease_reservation() returning just (not) all-done is not
> sufficient for the caller: If some pages were processed,
> guest_remove_page() returning an error for those pages is the expected
> result rather than an indication of a problem. Make guest_remove_page()
> return a distinct error code for this very case, and special case
> handling in case of seeing this error code in decrease_reservation().

The solution is good, but I think it needs more comments and a better
explanation.

How about:

---
p2m_pod_decrease_reservation() at the moment only returns a boolean
value: true for "nothing more to do", false for "something more to do".
If it returns false, decrease_reservation() will loop over the entire
range, calling guest_remove_page() for each page.

Unfortunately, in the case p2m_pod_decrease_reservation() succeeds
partially, some of the memory in the range will be not-present; at which
point guest_remove_page() will return an error, and the entire operation
will fail.

Fix this by:
1. Having p2m_pod_decrease_reservation() return exactly the number of
gpfn pages it has handled (i.e., replaced with 'not present')
2. Making guest_remove_page() return -ENOENT in the case that the gpfn
in question was already empty (and in no other cases)
3. When looping over guest_remove_page(), expect the number of -ENOENT
failures to be exactly equal to the number of pages
p2m_pod_decrease_reservation() removed.
---

> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -393,10 +393,10 @@ int guest_physmap_mark_populate_on_deman
>      return -ENOSYS;
>  }
>  
> -int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> -                                 unsigned int order)
> +unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> +                                           unsigned int order)
>  {
> -    return -ENOSYS;
> +    return 0;
>  }
>  
>  static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -510,11 +510,10 @@ p2m_pod_zero_check_superpage(struct p2m_
>   * Once both of these functions have been completed, we can return and
>   * allow decrease_reservation() to handle everything else.
>   */
> -int
> +unsigned long
>  p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
>  {
> -    int ret = 0;
> -    unsigned long i, n;
> +    unsigned long ret = 0, i, n;
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
>      bool_t steal_for_cache;
>      long pod, nonpod, ram;
> @@ -577,9 +576,9 @@ p2m_pod_decrease_reservation(struct doma
>                  domain_crash(d);
>              goto out_unlock;
>          }
> -        p2m->pod.entry_count -= 1UL << order;
> +        ret = 1UL << order;
> +        p2m->pod.entry_count -= ret;
>          BUG_ON(p2m->pod.entry_count < 0);
> -        ret = 1;
>          goto out_entry_check;
>      }
>  
> @@ -630,6 +629,7 @@ p2m_pod_decrease_reservation(struct doma
>              p2m->pod.entry_count -= n;
>              BUG_ON(p2m->pod.entry_count < 0);
>              pod -= n;
> +            ret += n;
>          }
>          else if ( steal_for_cache && p2m_is_ram(t) )
>          {
> @@ -664,16 +664,10 @@ p2m_pod_decrease_reservation(struct doma
>  
>              nonpod -= n;
>              ram -= n;
> +            ret += n;
>          }
>      }
>  
> -    /*
> -     * If there are no more non-PoD entries, tell decrease_reservation() that
> -     * there's nothing left to do.
> -     */
> -    if ( nonpod == 0 )
> -        ret = 1;
> -
>  out_entry_check:
>      /* If we've reduced our "liabilities" beyond our "assets", free some */
>      if ( p2m->pod.entry_count < p2m->pod.count )
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -284,13 +284,15 @@ int guest_remove_page(struct domain *d,
>  
>  #ifdef CONFIG_X86
>      mfn = get_gfn_query(d, gmfn, &p2mt);
> +    if ( unlikely(p2mt == p2m_invalid) || unlikely(p2mt == p2m_mmio_dm) )
> +        return -ENOENT;
>      if ( unlikely(p2m_is_paging(p2mt)) )
>      {
>          rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
> -        put_gfn(d, gmfn);
> -
>          if ( rc )
> -            return rc;
> +            goto out_put_gfn;
> +
> +        put_gfn(d, gmfn);
>  
>          /* If the page hasn't yet been paged out, there is an
>           * actual page that needs to be released. */
> @@ -308,9 +310,7 @@ int guest_remove_page(struct domain *d,
>      if ( p2mt == p2m_mmio_direct )
>      {
>          rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K);
> -        put_gfn(d, gmfn);
> -
> -        return rc;
> +        goto out_put_gfn;
>      }
>  #else
>      mfn = gfn_to_mfn(d, _gfn(gmfn));
> @@ -335,10 +335,8 @@ int guest_remove_page(struct domain *d,
>          rc = mem_sharing_unshare_page(d, gmfn, 0);
>          if ( rc )
>          {
> -            put_gfn(d, gmfn);
>              (void)mem_sharing_notify_enomem(d, gmfn, 0);
> -
> -            return rc;
> +            goto out_put_gfn;
>          }
>          /* Maybe the mfn changed */
>          mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
> @@ -375,9 +373,10 @@ int guest_remove_page(struct domain *d,
>          put_page(page);
>  
>      put_page(page);
> + out_put_gfn: __maybe_unused
>      put_gfn(d, gmfn);
>  
> -    return rc;
> +    return rc != -ENOENT ? rc : -EINVAL;

/*
 * Filter out -ENOENT return values that aren't a result of an
 * empty p2m entry
 */

>  }
>  
>  static void decrease_reservation(struct memop_args *a)
> @@ -392,6 +391,8 @@ static void decrease_reservation(struct
>  
>      for ( i = a->nr_done; i < a->nr_extents; i++ )
>      {
> +        unsigned long pod_done;
> +
>          if ( i != a->nr_done && hypercall_preempt_check() )
>          {
>              a->preempted = 1;
> @@ -416,14 +417,25 @@ static void decrease_reservation(struct
>          }
>  
>          /* See if populate-on-demand wants to handle this */
> -        if ( is_hvm_domain(a->domain)
> -             && p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
> -                                             a->extent_order) )
> -            continue;
> +        pod_done = is_hvm_domain(a->domain) ?
> +                   p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
> +                                                a->extent_order) : 0;
>  
> -        for ( j = 0; j < (1 << a->extent_order); j++ )
> -            if ( guest_remove_page(a->domain, gmfn + j) )

/*
 * Look for pages not handled by p2m_pod_decrease_reservation().
 *
 * guest_remove_page() will return -ENOENT for pages which have already
 * been removed by p2m_pod_decrease_reservation(); so expect to see
 * exactly pod_done failures.  Any more means that there were invalid
 * entries before p2m_pod_decrease_reservation() was called.
 */

> +        for ( j = 0; j + pod_done < (1UL << a->extent_order); j++ )
> +        {
> +            switch ( guest_remove_page(a->domain, gmfn + j) )
> +            {
> +            case 0:
> +                break;
> +            case -ENOENT:
> +                if ( !pod_done )
> +                    goto out;
> +                --pod_done;
> +                break;
> +            default:
>                  goto out;
> +            }
> +        }
>      }

What about:

ASSERT(pod_done == 0);

Thanks,
 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-07 12:56   ` George Dunlap
@ 2017-12-07 13:07     ` Jan Beulich
  0 siblings, 0 replies; 19+ messages in thread
From: Jan Beulich @ 2017-12-07 13:07 UTC (permalink / raw)
  To: George Dunlap
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall, xen-devel

>>> On 07.12.17 at 13:56, <george.dunlap@citrix.com> wrote:
> On 12/04/2017 11:06 AM, Jan Beulich wrote:
>> p2m_pod_decrease_reservation() returning just (not) all-done is not
>> sufficient for the caller: If some pages were processed,
>> guest_remove_page() returning an error for those pages is the expected
>> result rather than an indication of a problem. Make guest_remove_page()
>> return a distinct error code for this very case, and special case
>> handling in case of seeing this error code in decrease_reservation().
> 
> The solution is good, but I think it needs more comments and a better
> explanation.

You suggestions in this regard all sound good; I'll integrate them,
and unless you tell me otherwise I'll then also add you S-o-b.

>> +        for ( j = 0; j + pod_done < (1UL << a->extent_order); j++ )
>> +        {
>> +            switch ( guest_remove_page(a->domain, gmfn + j) )
>> +            {
>> +            case 0:
>> +                break;
>> +            case -ENOENT:
>> +                if ( !pod_done )
>> +                    goto out;
>> +                --pod_done;
>> +                break;
>> +            default:
>>                  goto out;
>> +            }
>> +        }
>>      }
> 
> What about:
> 
> ASSERT(pod_done == 0);

No, there's nothing preventing another vCPU of the guest doing
something that could result in triggering this assertion. If anything
I could make pod_done being non-zero after the loop a failure,
too. But part of me thinks this is too harsh ...

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2 0/2] x86: XSA-246 / -247 follow-up
  2017-12-04 10:49 [PATCH 0/3] x86: XSA-246 / -247 follow-up Jan Beulich
                   ` (2 preceding siblings ...)
  2017-12-04 11:07 ` [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
@ 2017-12-20  9:25 ` Jan Beulich
  2017-12-20  9:34   ` [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
  2017-12-20  9:35   ` [PATCH v2 2/2] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
  3 siblings, 2 replies; 19+ messages in thread
From: Jan Beulich @ 2017-12-20  9:25 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

1: PoD: correctly handle non-order-0 decrease-reservation requests
2: p2m: force return value checking of p2m_set_entry()

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-20  9:25 ` [PATCH v2 0/2] x86: XSA-246 / -247 follow-up Jan Beulich
@ 2017-12-20  9:34   ` Jan Beulich
  2018-01-18 15:59     ` Ping: " Jan Beulich
  2018-01-19 16:04     ` George Dunlap
  2017-12-20  9:35   ` [PATCH v2 2/2] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
  1 sibling, 2 replies; 19+ messages in thread
From: Jan Beulich @ 2017-12-20  9:34 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall

p2m_pod_decrease_reservation() at the moment only returns a boolean
value: true for "nothing more to do", false for "something more to do".
If it returns false, decrease_reservation() will loop over the entire
range, calling guest_remove_page() for each page.

Unfortunately, in the case p2m_pod_decrease_reservation() succeeds
partially, some of the memory in the range will be not-present; at which
point guest_remove_page() will return an error, and the entire operation
will fail.

Fix this by:
1. Having p2m_pod_decrease_reservation() return exactly the number of
   gpfn pages it has handled (i.e., replaced with 'not present').
2. Making guest_remove_page() return -ENOENT in the case that the gpfn
   in question was already empty (and in no other cases).
3. When looping over guest_remove_page(), expect the number of -ENOENT
   failures to be no larger than the number of pages
   p2m_pod_decrease_reservation() removed.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
v2: Re-written description (by George). Add comments (as suggested
    by George). Formatting.

--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -388,10 +388,10 @@ int guest_physmap_mark_populate_on_deman
     return -ENOSYS;
 }
 
-int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
-                                 unsigned int order)
+unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
+                                           unsigned int order)
 {
-    return -ENOSYS;
+    return 0;
 }
 
 static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -510,11 +510,10 @@ p2m_pod_zero_check_superpage(struct p2m_
  * Once both of these functions have been completed, we can return and
  * allow decrease_reservation() to handle everything else.
  */
-int
+unsigned long
 p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
 {
-    int ret = 0;
-    unsigned long i, n;
+    unsigned long ret = 0, i, n;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     bool_t steal_for_cache;
     long pod, nonpod, ram;
@@ -577,9 +576,9 @@ p2m_pod_decrease_reservation(struct doma
                 domain_crash(d);
             goto out_unlock;
         }
-        p2m->pod.entry_count -= 1UL << order;
+        ret = 1UL << order;
+        p2m->pod.entry_count -= ret;
         BUG_ON(p2m->pod.entry_count < 0);
-        ret = 1;
         goto out_entry_check;
     }
 
@@ -630,6 +629,7 @@ p2m_pod_decrease_reservation(struct doma
             p2m->pod.entry_count -= n;
             BUG_ON(p2m->pod.entry_count < 0);
             pod -= n;
+            ret += n;
         }
         else if ( steal_for_cache && p2m_is_ram(t) )
         {
@@ -664,16 +664,10 @@ p2m_pod_decrease_reservation(struct doma
 
             nonpod -= n;
             ram -= n;
+            ret += n;
         }
     }
 
-    /*
-     * If there are no more non-PoD entries, tell decrease_reservation() that
-     * there's nothing left to do.
-     */
-    if ( nonpod == 0 )
-        ret = 1;
-
 out_entry_check:
     /* If we've reduced our "liabilities" beyond our "assets", free some */
     if ( p2m->pod.entry_count < p2m->pod.count )
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -284,13 +284,16 @@ int guest_remove_page(struct domain *d,
 
 #ifdef CONFIG_X86
     mfn = get_gfn_query(d, gmfn, &p2mt);
+    if ( unlikely(p2mt == p2m_invalid) || unlikely(p2mt == p2m_mmio_dm) )
+        return -ENOENT;
+
     if ( unlikely(p2m_is_paging(p2mt)) )
     {
         rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
-        put_gfn(d, gmfn);
-
         if ( rc )
-            return rc;
+            goto out_put_gfn;
+
+        put_gfn(d, gmfn);
 
         /* If the page hasn't yet been paged out, there is an
          * actual page that needs to be released. */
@@ -308,9 +311,7 @@ int guest_remove_page(struct domain *d,
     if ( p2mt == p2m_mmio_direct )
     {
         rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K);
-        put_gfn(d, gmfn);
-
-        return rc;
+        goto out_put_gfn;
     }
 #else
     mfn = gfn_to_mfn(d, _gfn(gmfn));
@@ -335,10 +336,8 @@ int guest_remove_page(struct domain *d,
         rc = mem_sharing_unshare_page(d, gmfn, 0);
         if ( rc )
         {
-            put_gfn(d, gmfn);
             (void)mem_sharing_notify_enomem(d, gmfn, 0);
-
-            return rc;
+            goto out_put_gfn;
         }
         /* Maybe the mfn changed */
         mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
@@ -375,9 +374,14 @@ int guest_remove_page(struct domain *d,
         put_page(page);
 
     put_page(page);
+ out_put_gfn: __maybe_unused
     put_gfn(d, gmfn);
 
-    return rc;
+    /*
+     * Filter out -ENOENT return values that aren't a result of an empty p2m
+     * entry.
+     */
+    return rc != -ENOENT ? rc : -EINVAL;
 }
 
 static void decrease_reservation(struct memop_args *a)
@@ -392,6 +396,8 @@ static void decrease_reservation(struct
 
     for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
+        unsigned long pod_done;
+
         if ( i != a->nr_done && hypercall_preempt_check() )
         {
             a->preempted = 1;
@@ -416,14 +422,33 @@ static void decrease_reservation(struct
         }
 
         /* See if populate-on-demand wants to handle this */
-        if ( is_hvm_domain(a->domain)
-             && p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
-                                             a->extent_order) )
-            continue;
+        pod_done = is_hvm_domain(a->domain) ?
+                   p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
+                                                a->extent_order) : 0;
 
-        for ( j = 0; j < (1 << a->extent_order); j++ )
-            if ( guest_remove_page(a->domain, gmfn + j) )
+        /*
+         * Look for pages not handled by p2m_pod_decrease_reservation().
+         *
+         * guest_remove_page() will return -ENOENT for pages which have already
+         * been removed by p2m_pod_decrease_reservation(); so expect to see
+         * exactly pod_done failures.  Any more means that there were invalid
+         * entries before p2m_pod_decrease_reservation() was called.
+         */
+        for ( j = 0; j + pod_done < (1UL << a->extent_order); j++ )
+        {
+            switch ( guest_remove_page(a->domain, gmfn + j) )
+            {
+            case 0:
+                break;
+            case -ENOENT:
+                if ( !pod_done )
+                    goto out;
+                --pod_done;
+                break;
+            default:
                 goto out;
+            }
+        }
     }
 
  out:
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -26,9 +26,9 @@ int unmap_mmio_regions(struct domain *d,
 
 /*
  * Call when decreasing memory reservation to handle PoD entries properly.
- * Will return '1' if all entries were handled and nothing more need be done.
+ * Returns the number of pages that were successfully processed.
  */
-int
+unsigned long
 p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
                              unsigned int order);
 



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2 2/2] x86/p2m: force return value checking of p2m_set_entry()
  2017-12-20  9:25 ` [PATCH v2 0/2] x86: XSA-246 / -247 follow-up Jan Beulich
  2017-12-20  9:34   ` [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
@ 2017-12-20  9:35   ` Jan Beulich
  2018-01-19 17:09     ` George Dunlap
  1 sibling, 1 reply; 19+ messages in thread
From: Jan Beulich @ 2017-12-20  9:35 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Kevin Tian, Jun Nakajima

As XSAs 246 and 247 have shown, not doing so is rather dangerous.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1550,9 +1550,11 @@ void p2m_mem_paging_populate(struct doma
         if ( p2mt == p2m_ram_paging_out )
             req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
 
-        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
+        rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
     }
     gfn_unlock(p2m, gfn, 0);
+    if ( rc < 0 )
+        return;
 
     /* Pause domain if request came from guest and gfn has paging type */
     if ( p2m_is_paging(p2mt) && v->domain == d )
@@ -1700,10 +1702,12 @@ void p2m_mem_paging_resume(struct domain
          */
         if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
         {
-            p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                          paging_mode_log_dirty(d) ? p2m_ram_logdirty :
-                          p2m_ram_rw, a);
-            set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
+            int rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                                   paging_mode_log_dirty(d) ? p2m_ram_logdirty :
+                                   p2m_ram_rw, a);
+
+            if ( !rc )
+                set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
         }
         gfn_unlock(p2m, gfn, 0);
     }
@@ -2463,9 +2467,9 @@ static void p2m_reset_altp2m(struct p2m_
     p2m->max_remapped_gfn = 0;
 }
 
-void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
-                                 mfn_t mfn, unsigned int page_order,
-                                 p2m_type_t p2mt, p2m_access_t p2ma)
+int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
+                                mfn_t mfn, unsigned int page_order,
+                                p2m_type_t p2mt, p2m_access_t p2ma)
 {
     struct p2m_domain *p2m;
     p2m_access_t a;
@@ -2474,9 +2478,10 @@ void p2m_altp2m_propagate_change(struct
     unsigned int i;
     unsigned int reset_count = 0;
     unsigned int last_reset_idx = ~0;
+    int ret = 0;
 
     if ( !altp2m_active(d) )
-        return;
+        return 0;
 
     altp2m_list_lock(d);
 
@@ -2515,17 +2520,25 @@ void p2m_altp2m_propagate_change(struct
                     p2m_unlock(p2m);
                 }
 
-                goto out;
+                ret = 0;
+                break;
             }
         }
         else if ( !mfn_eq(m, INVALID_MFN) )
-            p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+        {
+            int rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+
+            /* Best effort: Don't bail on error. */
+            if ( !ret )
+                ret = rc;
+        }
 
         __put_gfn(p2m, gfn_x(gfn));
     }
 
- out:
     altp2m_list_unlock(d);
+
+    return ret;
 }
 
 /*** Audit ***/
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -904,7 +904,11 @@ out:
         ept_free_entry(p2m, &old_entry, target);
 
     if ( entry_written && p2m_is_hostp2m(p2m) )
-        p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
+    {
+        ret = p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
+        if ( !rc )
+            rc = ret;
+    }
 
     return rc;
 }
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -689,8 +689,9 @@ void p2m_free_ptp(struct p2m_domain *p2m
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need
  * a call to put_gfn afterwards/ */
-int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
-                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
+int __must_check p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
+                               unsigned int page_order, p2m_type_t p2mt,
+                               p2m_access_t p2ma);
 
 /* Set up function pointers for PT implementation: only for use by p2m code */
 extern void p2m_pt_init(struct p2m_domain *p2m);
@@ -830,9 +831,9 @@ int p2m_change_altp2m_gfn(struct domain
                           gfn_t old_gfn, gfn_t new_gfn);
 
 /* Propagate a host p2m change to all alternate p2m's */
-void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
-                                 mfn_t mfn, unsigned int page_order,
-                                 p2m_type_t p2mt, p2m_access_t p2ma);
+int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
+                                mfn_t mfn, unsigned int page_order,
+                                p2m_type_t p2mt, p2m_access_t p2ma);
 
 /*
  * p2m type to IOMMU flags



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Ping: [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-20  9:34   ` [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
@ 2018-01-18 15:59     ` Jan Beulich
  2018-01-18 16:36       ` Julien Grall
  2018-01-19 16:04     ` George Dunlap
  1 sibling, 1 reply; 19+ messages in thread
From: Jan Beulich @ 2018-01-18 15:59 UTC (permalink / raw)
  To: Julien Grall, Stefano Stabellini
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Tim Deegan, xen-devel,
	Ian Jackson

>>> On 20.12.17 at 10:34, <JBeulich@suse.com> wrote:
> p2m_pod_decrease_reservation() at the moment only returns a boolean
> value: true for "nothing more to do", false for "something more to do".
> If it returns false, decrease_reservation() will loop over the entire
> range, calling guest_remove_page() for each page.
> 
> Unfortunately, in the case p2m_pod_decrease_reservation() succeeds
> partially, some of the memory in the range will be not-present; at which
> point guest_remove_page() will return an error, and the entire operation
> will fail.
> 
> Fix this by:
> 1. Having p2m_pod_decrease_reservation() return exactly the number of
>    gpfn pages it has handled (i.e., replaced with 'not present').
> 2. Making guest_remove_page() return -ENOENT in the case that the gpfn
>    in question was already empty (and in no other cases).
> 3. When looping over guest_remove_page(), expect the number of -ENOENT
>    failures to be no larger than the number of pages
>    p2m_pod_decrease_reservation() removed.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> ---
> v2: Re-written description (by George). Add comments (as suggested
>     by George). Formatting.
> 
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -388,10 +388,10 @@ int guest_physmap_mark_populate_on_deman
>      return -ENOSYS;
>  }
>  
> -int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> -                                 unsigned int order)
> +unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> +                                           unsigned int order)
>  {
> -    return -ENOSYS;
> +    return 0;
>  }
>  
>  static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)

Stefano, Julien?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: Ping: [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2018-01-18 15:59     ` Ping: " Jan Beulich
@ 2018-01-18 16:36       ` Julien Grall
  0 siblings, 0 replies; 19+ messages in thread
From: Julien Grall @ 2018-01-18 16:36 UTC (permalink / raw)
  To: Jan Beulich, Julien Grall, Stefano Stabellini
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	xen-devel

Hi Jan,

On 18/01/18 15:59, Jan Beulich wrote:
>>>> On 20.12.17 at 10:34, <JBeulich@suse.com> wrote:
>> p2m_pod_decrease_reservation() at the moment only returns a boolean
>> value: true for "nothing more to do", false for "something more to do".
>> If it returns false, decrease_reservation() will loop over the entire
>> range, calling guest_remove_page() for each page.
>>
>> Unfortunately, in the case p2m_pod_decrease_reservation() succeeds
>> partially, some of the memory in the range will be not-present; at which
>> point guest_remove_page() will return an error, and the entire operation
>> will fail.
>>
>> Fix this by:
>> 1. Having p2m_pod_decrease_reservation() return exactly the number of
>>     gpfn pages it has handled (i.e., replaced with 'not present').
>> 2. Making guest_remove_page() return -ENOENT in the case that the gpfn
>>     in question was already empty (and in no other cases).
>> 3. When looping over guest_remove_page(), expect the number of -ENOENT
>>     failures to be no larger than the number of pages
>>     p2m_pod_decrease_reservation() removed.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
>> ---
>> v2: Re-written description (by George). Add comments (as suggested
>>      by George). Formatting.
>>
>> --- a/xen/arch/arm/p2m.c
>> +++ b/xen/arch/arm/p2m.c
>> @@ -388,10 +388,10 @@ int guest_physmap_mark_populate_on_deman
>>       return -ENOSYS;
>>   }
>>   
>> -int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
>> -                                 unsigned int order)
>> +unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
>> +                                           unsigned int order)
>>   {
>> -    return -ENOSYS;
>> +    return 0;
>>   }
>>   
>>   static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
> 
> Stefano, Julien?

Sorry, it fell through the cracks.

Acked-by: Julien Grall <julien.grall@linaro.org>

Cheers,

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2017-12-20  9:34   ` [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
  2018-01-18 15:59     ` Ping: " Jan Beulich
@ 2018-01-19 16:04     ` George Dunlap
  2018-01-19 16:13       ` Jan Beulich
  1 sibling, 1 reply; 19+ messages in thread
From: George Dunlap @ 2018-01-19 16:04 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall

On 12/20/2017 09:34 AM, Jan Beulich wrote:
> p2m_pod_decrease_reservation() at the moment only returns a boolean
> value: true for "nothing more to do", false for "something more to do".
> If it returns false, decrease_reservation() will loop over the entire
> range, calling guest_remove_page() for each page.
> 
> Unfortunately, in the case p2m_pod_decrease_reservation() succeeds
> partially, some of the memory in the range will be not-present; at which
> point guest_remove_page() will return an error, and the entire operation
> will fail.
> 
> Fix this by:
> 1. Having p2m_pod_decrease_reservation() return exactly the number of
>    gpfn pages it has handled (i.e., replaced with 'not present').
> 2. Making guest_remove_page() return -ENOENT in the case that the gpfn
>    in question was already empty (and in no other cases).
> 3. When looping over guest_remove_page(), expect the number of -ENOENT
>    failures to be no larger than the number of pages
>    p2m_pod_decrease_reservation() removed.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> ---
> v2: Re-written description (by George). Add comments (as suggested
>     by George). Formatting.

One thing to double-check...

> @@ -335,10 +336,8 @@ int guest_remove_page(struct domain *d,
>          rc = mem_sharing_unshare_page(d, gmfn, 0);
>          if ( rc )
>          {
> -            put_gfn(d, gmfn);
>              (void)mem_sharing_notify_enomem(d, gmfn, 0);
> -
> -            return rc;
> +            goto out_put_gfn;

I take it you've checked to make sure that moving this put_gfn() over
the notify call is OK?

I took a brief look and it seems OK to me; so if you're happy with that
then:

Reviewed-by: George Dunlap <george.dunlap@citrix.com>

Thanks, and sorry for the delay.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests
  2018-01-19 16:04     ` George Dunlap
@ 2018-01-19 16:13       ` Jan Beulich
  0 siblings, 0 replies; 19+ messages in thread
From: Jan Beulich @ 2018-01-19 16:13 UTC (permalink / raw)
  To: George Dunlap
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall, xen-devel

>>> On 19.01.18 at 17:04, <george.dunlap@citrix.com> wrote:
> On 12/20/2017 09:34 AM, Jan Beulich wrote:
>> p2m_pod_decrease_reservation() at the moment only returns a boolean
>> value: true for "nothing more to do", false for "something more to do".
>> If it returns false, decrease_reservation() will loop over the entire
>> range, calling guest_remove_page() for each page.
>> 
>> Unfortunately, in the case p2m_pod_decrease_reservation() succeeds
>> partially, some of the memory in the range will be not-present; at which
>> point guest_remove_page() will return an error, and the entire operation
>> will fail.
>> 
>> Fix this by:
>> 1. Having p2m_pod_decrease_reservation() return exactly the number of
>>    gpfn pages it has handled (i.e., replaced with 'not present').
>> 2. Making guest_remove_page() return -ENOENT in the case that the gpfn
>>    in question was already empty (and in no other cases).
>> 3. When looping over guest_remove_page(), expect the number of -ENOENT
>>    failures to be no larger than the number of pages
>>    p2m_pod_decrease_reservation() removed.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
>> ---
>> v2: Re-written description (by George). Add comments (as suggested
>>     by George). Formatting.
> 
> One thing to double-check...
> 
>> @@ -335,10 +336,8 @@ int guest_remove_page(struct domain *d,
>>          rc = mem_sharing_unshare_page(d, gmfn, 0);
>>          if ( rc )
>>          {
>> -            put_gfn(d, gmfn);
>>              (void)mem_sharing_notify_enomem(d, gmfn, 0);
>> -
>> -            return rc;
>> +            goto out_put_gfn;
> 
> I take it you've checked to make sure that moving this put_gfn() over
> the notify call is OK?

Yes, in fact the stale-ness of the GFN is going to be slightly
reduced by the reference being held across that function (i.e.
now at least it's not stale at the time the event is put on the
ring). The function doesn't use it for anything other than
storing its value.

> I took a brief look and it seems OK to me; so if you're happy with that
> then:
> 
> Reviewed-by: George Dunlap <george.dunlap@citrix.com>

Thanks, it has gone in earlier today already anyway, based on
Andrew's ack (which admittedly he has given over irc rather
than by mail).

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 2/2] x86/p2m: force return value checking of p2m_set_entry()
  2017-12-20  9:35   ` [PATCH v2 2/2] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
@ 2018-01-19 17:09     ` George Dunlap
  0 siblings, 0 replies; 19+ messages in thread
From: George Dunlap @ 2018-01-19 17:09 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: George Dunlap, Andrew Cooper, Kevin Tian, Jun Nakajima

On 12/20/2017 09:35 AM, Jan Beulich wrote:
> As XSAs 246 and 247 have shown, not doing so is rather dangerous.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
> 
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1550,9 +1550,11 @@ void p2m_mem_paging_populate(struct doma
>          if ( p2mt == p2m_ram_paging_out )
>              req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
>  
> -        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
> +        rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
>      }
>      gfn_unlock(p2m, gfn, 0);
> +    if ( rc < 0 )
> +        return;

On the failure path we call vm_event_claim_slot(), but don't release it.
 Looks like maybe we need an out_cancel that calls vm_event_cancel_slot()?

I was going to say something about |= MEM_PAGING_EVICT_FAIL, but it
looks like that only gets used on success.

>      /* Pause domain if request came from guest and gfn has paging type */
>      if ( p2m_is_paging(p2mt) && v->domain == d )
> @@ -1700,10 +1702,12 @@ void p2m_mem_paging_resume(struct domain
>           */
>          if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
>          {
> -            p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> -                          paging_mode_log_dirty(d) ? p2m_ram_logdirty :
> -                          p2m_ram_rw, a);
> -            set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
> +            int rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> +                                   paging_mode_log_dirty(d) ? p2m_ram_logdirty :
> +                                   p2m_ram_rw, a);
> +
> +            if ( !rc )
> +                set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
>          }
>          gfn_unlock(p2m, gfn, 0);
>      }
> @@ -2463,9 +2467,9 @@ static void p2m_reset_altp2m(struct p2m_
>      p2m->max_remapped_gfn = 0;
>  }
>  
> -void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
> -                                 mfn_t mfn, unsigned int page_order,
> -                                 p2m_type_t p2mt, p2m_access_t p2ma)
> +int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
> +                                mfn_t mfn, unsigned int page_order,
> +                                p2m_type_t p2mt, p2m_access_t p2ma)
>  {
>      struct p2m_domain *p2m;
>      p2m_access_t a;
> @@ -2474,9 +2478,10 @@ void p2m_altp2m_propagate_change(struct
>      unsigned int i;
>      unsigned int reset_count = 0;
>      unsigned int last_reset_idx = ~0;
> +    int ret = 0;
>  
>      if ( !altp2m_active(d) )
> -        return;
> +        return 0;
>  
>      altp2m_list_lock(d);
>  
> @@ -2515,17 +2520,25 @@ void p2m_altp2m_propagate_change(struct
>                      p2m_unlock(p2m);
>                  }
>  
> -                goto out;
> +                ret = 0;
> +                break;
>              }
>          }
>          else if ( !mfn_eq(m, INVALID_MFN) )
> -            p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
> +        {
> +            int rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
> +
> +            /* Best effort: Don't bail on error. */
> +            if ( !ret )
> +                ret = rc;
> +        }

Hmm, this isn't great -- add this to the long list of functions that
say, "Something went wrong -- maybe nothing was done, maybe it was half
done; good luck."

Wasn't there a patch floating around that would crash a domain if
p2m_set_entry() failed to break down a superpage?  I can't seem to find
that now.

Anyway I think apart from the "leak" mentioned above, the rest of the
patch is fine; the situation isn't great but you're not really making it
worse.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2018-01-19 17:10 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-04 10:49 [PATCH 0/3] x86: XSA-246 / -247 follow-up Jan Beulich
2017-12-04 11:06 ` [PATCH 1/3] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
2017-12-04 15:58   ` Andrew Cooper
2017-12-05  7:42     ` Jan Beulich
2017-12-07 12:56   ` George Dunlap
2017-12-07 13:07     ` Jan Beulich
2017-12-04 11:06 ` [PATCH 2/3] x86/mm: drop yet another relic of translated PV domains from new_guest_cr3() Jan Beulich
2017-12-04 15:58   ` Andrew Cooper
2017-12-04 11:07 ` [PATCH 3/3] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
2017-12-04 16:03   ` Andrew Cooper
2017-12-05  1:47   ` Tian, Kevin
2017-12-20  9:25 ` [PATCH v2 0/2] x86: XSA-246 / -247 follow-up Jan Beulich
2017-12-20  9:34   ` [PATCH v2 1/2] x86/PoD: correctly handle non-order-0 decrease-reservation requests Jan Beulich
2018-01-18 15:59     ` Ping: " Jan Beulich
2018-01-18 16:36       ` Julien Grall
2018-01-19 16:04     ` George Dunlap
2018-01-19 16:13       ` Jan Beulich
2017-12-20  9:35   ` [PATCH v2 2/2] x86/p2m: force return value checking of p2m_set_entry() Jan Beulich
2018-01-19 17:09     ` George Dunlap

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.