All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
@ 2016-04-29  8:13 Jan Beulich
  2016-04-29  8:29 ` Paul Durrant
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Jan Beulich @ 2016-04-29  8:13 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim Deegan, Paul Durrant, Wei Liu

[-- Attachment #1: Type: text/plain, Size: 12048 bytes --]

prepare_ring_for_helper(), just like share_xen_page_with_guest(),
takes a write reference on the page, and hence should similarly be
accounted for when determining whether to log a complaint.

This requires using recursive locking for the ioreq server lock, as the
offending invocation of sh_remove_all_mappings() is down the call stack
from hvm_set_ioreq_server_state(). (While not strictly needed to be
done in all other instances too, convert all of them for consistency.)

At once improve the usefulness of the shadow error message: Log all
values involved in triggering it as well as the GFN (to aid
understanding which guest page it is that there is a problem with - in
cases like the one here the GFN is invariant across invocations, while
the MFN obviously can [and will] vary).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -240,6 +240,30 @@ static int hvm_map_ioreq_page(
     return 0;
 }
 
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
+{
+    const struct hvm_ioreq_server *s;
+    bool_t found = 0;
+
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server.list,
+                          list_entry )
+    {
+        if ( (s->ioreq.va && s->ioreq.page == page) ||
+             (s->bufioreq.va && s->bufioreq.page == page) )
+        {
+            found = 1;
+            break;
+        }
+    }
+
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    return found;
+}
+
 static void hvm_remove_ioreq_gmfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
@@ -671,7 +695,7 @@ int hvm_create_ioreq_server(struct domai
         goto fail1;
 
     domain_pause(d);
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -EEXIST;
     if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
@@ -694,14 +718,14 @@ int hvm_create_ioreq_server(struct domai
     if ( id )
         *id = s->id;
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail3:
  fail2:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -714,7 +738,7 @@ int hvm_destroy_ioreq_server(struct doma
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -743,7 +767,7 @@ int hvm_destroy_ioreq_server(struct doma
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -756,7 +780,7 @@ int hvm_get_ioreq_server_info(struct dom
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -781,7 +805,7 @@ int hvm_get_ioreq_server_info(struct dom
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -793,7 +817,7 @@ int hvm_map_io_range_to_ioreq_server(str
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -833,7 +857,7 @@ int hvm_map_io_range_to_ioreq_server(str
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -845,7 +869,7 @@ int hvm_unmap_io_range_from_ioreq_server
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -885,7 +909,7 @@ int hvm_unmap_io_range_from_ioreq_server
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -896,7 +920,7 @@ int hvm_set_ioreq_server_state(struct do
     struct list_head *entry;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each ( entry,
@@ -925,7 +949,7 @@ int hvm_set_ioreq_server_state(struct do
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
@@ -934,7 +958,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
@@ -947,7 +971,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
             goto fail;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return 0;
 
@@ -957,7 +981,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -966,21 +990,21 @@ void hvm_all_ioreq_servers_remove_vcpu(s
 {
     struct hvm_ioreq_server *s;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
 {
     struct hvm_ioreq_server *s, *next;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1003,7 +1027,7 @@ void hvm_destroy_all_ioreq_servers(struc
         xfree(s);
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
@@ -1027,7 +1051,7 @@ int hvm_set_dm_domain(struct domain *d,
     struct hvm_ioreq_server *s;
     int rc = 0;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /*
      * Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
@@ -1076,7 +1100,7 @@ int hvm_set_dm_domain(struct domain *d,
     domain_unpause(d);
 
  done:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -35,6 +35,7 @@
 #include <asm/current.h>
 #include <asm/flushtlb.h>
 #include <asm/shadow.h>
+#include <asm/hvm/ioreq.h>
 #include <xen/numa.h>
 #include "private.h"
 
@@ -2591,7 +2592,8 @@ int sh_remove_write_access_from_sl1p(str
 /* Remove all mappings of a guest frame from the shadow tables.
  * Returns non-zero if we need to flush TLBs. */
 
-static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn)
+static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn,
+                                  unsigned long gfn)
 {
     struct page_info *page = mfn_to_page(gmfn);
 
@@ -2643,19 +2645,24 @@ static int sh_remove_all_mappings(struct
     /* If that didn't catch the mapping, something is very wrong */
     if ( !sh_check_page_has_no_refs(page) )
     {
-        /* Don't complain if we're in HVM and there are some extra mappings:
+        /*
+         * Don't complain if we're in HVM and there are some extra mappings:
          * The qemu helper process has an untyped mapping of this dom's RAM
          * and the HVM restore program takes another.
-         * Also allow one typed refcount for xenheap pages, to match
-         * share_xen_page_with_guest(). */
+         * Also allow one typed refcount for
+         * - Xen heap pages, to match share_xen_page_with_guest(),
+         * - ioreq server pages, to match prepare_ring_for_helper().
+         */
         if ( !(shadow_mode_external(d)
                && (page->count_info & PGC_count_mask) <= 3
                && ((page->u.inuse.type_info & PGT_count_mask)
-                   == !!is_xen_heap_page(page))) )
+                   == (is_xen_heap_page(page) ||
+                       is_ioreq_server_page(d, page)))) )
         {
-            SHADOW_ERROR("can't find all mappings of mfn %lx: "
-                          "c=%08lx t=%08lx\n", mfn_x(gmfn),
-                          page->count_info, page->u.inuse.type_info);
+            SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
+                          "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn,
+                          page->count_info, page->u.inuse.type_info,
+                          !!is_xen_heap_page(page), is_ioreq_server_page(d, page));
         }
     }
 
@@ -3515,7 +3522,7 @@ static void sh_unshadow_for_p2m_change(s
         if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
             sh_remove_all_shadows_and_parents(d, mfn);
-            if ( sh_remove_all_mappings(d, mfn) )
+            if ( sh_remove_all_mappings(d, mfn, gfn) )
                 flush_tlb_mask(d->domain_dirty_cpumask);
         }
     }
@@ -3550,7 +3557,8 @@ static void sh_unshadow_for_p2m_change(s
                 {
                     /* This GFN->MFN mapping has gone away */
                     sh_remove_all_shadows_and_parents(d, omfn);
-                    if ( sh_remove_all_mappings(d, omfn) )
+                    if ( sh_remove_all_mappings(d, omfn,
+                                                gfn + (i << PAGE_SHIFT)) )
                         cpumask_or(&flushmask, &flushmask,
                                    d->domain_dirty_cpumask);
                 }
@@ -3766,7 +3774,8 @@ int shadow_track_dirty_vram(struct domai
                         dirty = 1;
                         /* TODO: Heuristics for finding the single mapping of
                          * this gmfn */
-                        flush_tlb |= sh_remove_all_mappings(d, mfn);
+                        flush_tlb |= sh_remove_all_mappings(d, mfn,
+                                                            begin_pfn + i);
                     }
                     else
                     {
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -21,6 +21,7 @@
 
 bool_t hvm_io_pending(struct vcpu *v);
 bool_t handle_hvm_io_completion(struct vcpu *v);
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
 
 int hvm_create_ioreq_server(struct domain *d, domid_t domid,
                             bool_t is_default, int bufioreq_handling,



[-- Attachment #2: x86-sh-ioreqsrv-pages.patch --]
[-- Type: text/plain, Size: 12133 bytes --]

x86/shadow: account for ioreq server pages before complaining about not found mapping

prepare_ring_for_helper(), just like share_xen_page_with_guest(),
takes a write reference on the page, and hence should similarly be
accounted for when determining whether to log a complaint.

This requires using recursive locking for the ioreq server lock, as the
offending invocation of sh_remove_all_mappings() is down the call stack
from hvm_set_ioreq_server_state(). (While not strictly needed to be
done in all other instances too, convert all of them for consistency.)

At once improve the usefulness of the shadow error message: Log all
values involved in triggering it as well as the GFN (to aid
understanding which guest page it is that there is a problem with - in
cases like the one here the GFN is invariant across invocations, while
the MFN obviously can [and will] vary).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -240,6 +240,30 @@ static int hvm_map_ioreq_page(
     return 0;
 }
 
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
+{
+    const struct hvm_ioreq_server *s;
+    bool_t found = 0;
+
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server.list,
+                          list_entry )
+    {
+        if ( (s->ioreq.va && s->ioreq.page == page) ||
+             (s->bufioreq.va && s->bufioreq.page == page) )
+        {
+            found = 1;
+            break;
+        }
+    }
+
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    return found;
+}
+
 static void hvm_remove_ioreq_gmfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
@@ -671,7 +695,7 @@ int hvm_create_ioreq_server(struct domai
         goto fail1;
 
     domain_pause(d);
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -EEXIST;
     if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
@@ -694,14 +718,14 @@ int hvm_create_ioreq_server(struct domai
     if ( id )
         *id = s->id;
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail3:
  fail2:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -714,7 +738,7 @@ int hvm_destroy_ioreq_server(struct doma
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -743,7 +767,7 @@ int hvm_destroy_ioreq_server(struct doma
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -756,7 +780,7 @@ int hvm_get_ioreq_server_info(struct dom
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -781,7 +805,7 @@ int hvm_get_ioreq_server_info(struct dom
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -793,7 +817,7 @@ int hvm_map_io_range_to_ioreq_server(str
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -833,7 +857,7 @@ int hvm_map_io_range_to_ioreq_server(str
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -845,7 +869,7 @@ int hvm_unmap_io_range_from_ioreq_server
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -885,7 +909,7 @@ int hvm_unmap_io_range_from_ioreq_server
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -896,7 +920,7 @@ int hvm_set_ioreq_server_state(struct do
     struct list_head *entry;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each ( entry,
@@ -925,7 +949,7 @@ int hvm_set_ioreq_server_state(struct do
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
@@ -934,7 +958,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
@@ -947,7 +971,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
             goto fail;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return 0;
 
@@ -957,7 +981,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -966,21 +990,21 @@ void hvm_all_ioreq_servers_remove_vcpu(s
 {
     struct hvm_ioreq_server *s;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
 {
     struct hvm_ioreq_server *s, *next;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1003,7 +1027,7 @@ void hvm_destroy_all_ioreq_servers(struc
         xfree(s);
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
@@ -1027,7 +1051,7 @@ int hvm_set_dm_domain(struct domain *d,
     struct hvm_ioreq_server *s;
     int rc = 0;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /*
      * Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
@@ -1076,7 +1100,7 @@ int hvm_set_dm_domain(struct domain *d,
     domain_unpause(d);
 
  done:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -35,6 +35,7 @@
 #include <asm/current.h>
 #include <asm/flushtlb.h>
 #include <asm/shadow.h>
+#include <asm/hvm/ioreq.h>
 #include <xen/numa.h>
 #include "private.h"
 
@@ -2591,7 +2592,8 @@ int sh_remove_write_access_from_sl1p(str
 /* Remove all mappings of a guest frame from the shadow tables.
  * Returns non-zero if we need to flush TLBs. */
 
-static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn)
+static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn,
+                                  unsigned long gfn)
 {
     struct page_info *page = mfn_to_page(gmfn);
 
@@ -2643,19 +2645,24 @@ static int sh_remove_all_mappings(struct
     /* If that didn't catch the mapping, something is very wrong */
     if ( !sh_check_page_has_no_refs(page) )
     {
-        /* Don't complain if we're in HVM and there are some extra mappings:
+        /*
+         * Don't complain if we're in HVM and there are some extra mappings:
          * The qemu helper process has an untyped mapping of this dom's RAM
          * and the HVM restore program takes another.
-         * Also allow one typed refcount for xenheap pages, to match
-         * share_xen_page_with_guest(). */
+         * Also allow one typed refcount for
+         * - Xen heap pages, to match share_xen_page_with_guest(),
+         * - ioreq server pages, to match prepare_ring_for_helper().
+         */
         if ( !(shadow_mode_external(d)
                && (page->count_info & PGC_count_mask) <= 3
                && ((page->u.inuse.type_info & PGT_count_mask)
-                   == !!is_xen_heap_page(page))) )
+                   == (is_xen_heap_page(page) ||
+                       is_ioreq_server_page(d, page)))) )
         {
-            SHADOW_ERROR("can't find all mappings of mfn %lx: "
-                          "c=%08lx t=%08lx\n", mfn_x(gmfn),
-                          page->count_info, page->u.inuse.type_info);
+            SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
+                          "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn,
+                          page->count_info, page->u.inuse.type_info,
+                          !!is_xen_heap_page(page), is_ioreq_server_page(d, page));
         }
     }
 
@@ -3515,7 +3522,7 @@ static void sh_unshadow_for_p2m_change(s
         if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
             sh_remove_all_shadows_and_parents(d, mfn);
-            if ( sh_remove_all_mappings(d, mfn) )
+            if ( sh_remove_all_mappings(d, mfn, gfn) )
                 flush_tlb_mask(d->domain_dirty_cpumask);
         }
     }
@@ -3550,7 +3557,8 @@ static void sh_unshadow_for_p2m_change(s
                 {
                     /* This GFN->MFN mapping has gone away */
                     sh_remove_all_shadows_and_parents(d, omfn);
-                    if ( sh_remove_all_mappings(d, omfn) )
+                    if ( sh_remove_all_mappings(d, omfn,
+                                                gfn + (i << PAGE_SHIFT)) )
                         cpumask_or(&flushmask, &flushmask,
                                    d->domain_dirty_cpumask);
                 }
@@ -3766,7 +3774,8 @@ int shadow_track_dirty_vram(struct domai
                         dirty = 1;
                         /* TODO: Heuristics for finding the single mapping of
                          * this gmfn */
-                        flush_tlb |= sh_remove_all_mappings(d, mfn);
+                        flush_tlb |= sh_remove_all_mappings(d, mfn,
+                                                            begin_pfn + i);
                     }
                     else
                     {
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -21,6 +21,7 @@
 
 bool_t hvm_io_pending(struct vcpu *v);
 bool_t handle_hvm_io_completion(struct vcpu *v);
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
 
 int hvm_create_ioreq_server(struct domain *d, domid_t domid,
                             bool_t is_default, int bufioreq_handling,

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
  2016-04-29  8:13 [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping Jan Beulich
@ 2016-04-29  8:29 ` Paul Durrant
  2016-04-29  9:21   ` Jan Beulich
  2016-04-29 12:28 ` Andrew Cooper
  2016-04-29 13:12 ` Wei Liu
  2 siblings, 1 reply; 8+ messages in thread
From: Paul Durrant @ 2016-04-29  8:29 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Tim (Xen.org), Wei Liu

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 29 April 2016 09:14
> To: xen-devel
> Cc: Paul Durrant; Wei Liu; Tim (Xen.org)
> Subject: [PATCH] x86/shadow: account for ioreq server pages before
> complaining about not found mapping
> 
> prepare_ring_for_helper(), just like share_xen_page_with_guest(),
> takes a write reference on the page, and hence should similarly be
> accounted for when determining whether to log a complaint.
> 
> This requires using recursive locking for the ioreq server lock, as the
> offending invocation of sh_remove_all_mappings() is down the call stack
> from hvm_set_ioreq_server_state(). (While not strictly needed to be
> done in all other instances too, convert all of them for consistency.)

Do you have an example of a call stack? Is the recursion due to the domain_pause() being done with the ioreq server spinlock held?

  Paul

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
  2016-04-29  8:29 ` Paul Durrant
@ 2016-04-29  9:21   ` Jan Beulich
  2016-04-29  9:35     ` Paul Durrant
  0 siblings, 1 reply; 8+ messages in thread
From: Jan Beulich @ 2016-04-29  9:21 UTC (permalink / raw)
  To: Paul Durrant; +Cc: xen-devel, Wei Liu, Tim (Xen.org)

>>> On 29.04.16 at 10:29, <Paul.Durrant@citrix.com> wrote:
>>  -----Original Message-----
>> From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: 29 April 2016 09:14
>> To: xen-devel
>> Cc: Paul Durrant; Wei Liu; Tim (Xen.org)
>> Subject: [PATCH] x86/shadow: account for ioreq server pages before
>> complaining about not found mapping
>> 
>> prepare_ring_for_helper(), just like share_xen_page_with_guest(),
>> takes a write reference on the page, and hence should similarly be
>> accounted for when determining whether to log a complaint.
>> 
>> This requires using recursive locking for the ioreq server lock, as the
>> offending invocation of sh_remove_all_mappings() is down the call stack
>> from hvm_set_ioreq_server_state(). (While not strictly needed to be
>> done in all other instances too, convert all of them for consistency.)
> 
> Do you have an example of a call stack?

(XEN) Xen call trace:
(XEN)    [<ffff82d08021ca61>] common.c#sh_remove_all_mappings+0x1fb/0x27d
(XEN)    [<ffff82d08021ea2a>] common.c#sh_unshadow_for_p2m_change+0xc9/0x2a8
(XEN)    [<ffff82d08021ed06>] shadow_write_p2m_entry+0xfd/0x168
(XEN)    [<ffff82d0801ffe64>] paging_write_p2m_entry+0x44/0x51
(XEN)    [<ffff82d08020c706>] p2m-pt.c#p2m_pt_set_entry+0x539/0x846
(XEN)    [<ffff82d080201e57>] p2m_set_entry+0xd2/0x113
(XEN)    [<ffff82d080202423>] p2m.c#p2m_remove_page+0x20a/0x220
(XEN)    [<ffff82d0802069c5>] guest_physmap_remove_page+0x19b/0x207
(XEN)    [<ffff82d0801db0b0>] ioreq.c#hvm_remove_ioreq_gmfn+0x4e/0x5e
(XEN)    [<ffff82d0801db123>] ioreq.c#hvm_ioreq_server_enable+0x63/0xaf
(XEN)    [<ffff82d0801db1d8>] hvm_set_ioreq_server_state+0x69/0xb3
(XEN)    [<ffff82d0801d5495>] do_hvm_op+0x56a/0x1c85
(XEN)    [<ffff82d080243f5d>] lstar_enter+0xdd/0x137

> Is the recursion due to the 
> domain_pause() being done with the ioreq server spinlock held?

I don't think the domain_pause() matters here.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
  2016-04-29  9:21   ` Jan Beulich
@ 2016-04-29  9:35     ` Paul Durrant
  0 siblings, 0 replies; 8+ messages in thread
From: Paul Durrant @ 2016-04-29  9:35 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Wei Liu, Tim (Xen.org)

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 29 April 2016 10:22
> To: Paul Durrant
> Cc: Wei Liu; xen-devel; Tim (Xen.org)
> Subject: RE: [PATCH] x86/shadow: account for ioreq server pages before
> complaining about not found mapping
> 
> >>> On 29.04.16 at 10:29, <Paul.Durrant@citrix.com> wrote:
> >>  -----Original Message-----
> >> From: Jan Beulich [mailto:JBeulich@suse.com]
> >> Sent: 29 April 2016 09:14
> >> To: xen-devel
> >> Cc: Paul Durrant; Wei Liu; Tim (Xen.org)
> >> Subject: [PATCH] x86/shadow: account for ioreq server pages before
> >> complaining about not found mapping
> >>
> >> prepare_ring_for_helper(), just like share_xen_page_with_guest(),
> >> takes a write reference on the page, and hence should similarly be
> >> accounted for when determining whether to log a complaint.
> >>
> >> This requires using recursive locking for the ioreq server lock, as the
> >> offending invocation of sh_remove_all_mappings() is down the call stack
> >> from hvm_set_ioreq_server_state(). (While not strictly needed to be
> >> done in all other instances too, convert all of them for consistency.)
> >
> > Do you have an example of a call stack?
> 
> (XEN) Xen call trace:
> (XEN)    [<ffff82d08021ca61>]
> common.c#sh_remove_all_mappings+0x1fb/0x27d
> (XEN)    [<ffff82d08021ea2a>]
> common.c#sh_unshadow_for_p2m_change+0xc9/0x2a8
> (XEN)    [<ffff82d08021ed06>] shadow_write_p2m_entry+0xfd/0x168
> (XEN)    [<ffff82d0801ffe64>] paging_write_p2m_entry+0x44/0x51
> (XEN)    [<ffff82d08020c706>] p2m-pt.c#p2m_pt_set_entry+0x539/0x846
> (XEN)    [<ffff82d080201e57>] p2m_set_entry+0xd2/0x113
> (XEN)    [<ffff82d080202423>] p2m.c#p2m_remove_page+0x20a/0x220
> (XEN)    [<ffff82d0802069c5>] guest_physmap_remove_page+0x19b/0x207
> (XEN)    [<ffff82d0801db0b0>] ioreq.c#hvm_remove_ioreq_gmfn+0x4e/0x5e
> (XEN)    [<ffff82d0801db123>] ioreq.c#hvm_ioreq_server_enable+0x63/0xaf
> (XEN)    [<ffff82d0801db1d8>] hvm_set_ioreq_server_state+0x69/0xb3
> (XEN)    [<ffff82d0801d5495>] do_hvm_op+0x56a/0x1c85
> (XEN)    [<ffff82d080243f5d>] lstar_enter+0xdd/0x137
> 
> > Is the recursion due to the
> > domain_pause() being done with the ioreq server spinlock held?
> 
> I don't think the domain_pause() matters here.

Thanks for the stack. Yes the domain_pause() is clearly irrelevant and there's no trivial way to avoid the need for the recursive lock so

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
  2016-04-29  8:13 [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping Jan Beulich
  2016-04-29  8:29 ` Paul Durrant
@ 2016-04-29 12:28 ` Andrew Cooper
  2016-04-29 12:39   ` Jan Beulich
  2016-04-29 13:12 ` Wei Liu
  2 siblings, 1 reply; 8+ messages in thread
From: Andrew Cooper @ 2016-04-29 12:28 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Wei Liu, Paul Durrant, Tim Deegan

On 29/04/16 09:13, Jan Beulich wrote:
> prepare_ring_for_helper(), just like share_xen_page_with_guest(),
> takes a write reference on the page, and hence should similarly be
> accounted for when determining whether to log a complaint.
>
> This requires using recursive locking for the ioreq server lock, as the
> offending invocation of sh_remove_all_mappings() is down the call stack
> from hvm_set_ioreq_server_state(). (While not strictly needed to be
> done in all other instances too, convert all of them for consistency.)
>
> At once improve the usefulness of the shadow error message: Log all
> values involved in triggering it as well as the GFN (to aid
> understanding which guest page it is that there is a problem with - in
> cases like the one here the GFN is invariant across invocations, while
> the MFN obviously can [and will] vary).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

IMO, this is a 4.7 candidate. I already had a TODO list item of working
out why the shadow code was always complaining.

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>, albeit with one
further suggestion.

> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -35,6 +35,7 @@
>  #include <asm/current.h>
>  #include <asm/flushtlb.h>
>  #include <asm/shadow.h>
> +#include <asm/hvm/ioreq.h>
>  #include <xen/numa.h>
>  #include "private.h"
>  
> @@ -2591,7 +2592,8 @@ int sh_remove_write_access_from_sl1p(str
>  /* Remove all mappings of a guest frame from the shadow tables.
>   * Returns non-zero if we need to flush TLBs. */
>  
> -static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn)
> +static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn,
> +                                  unsigned long gfn)

It would be nice if we could use gfn_t rather than having more unsigned
longs.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
  2016-04-29 12:28 ` Andrew Cooper
@ 2016-04-29 12:39   ` Jan Beulich
  0 siblings, 0 replies; 8+ messages in thread
From: Jan Beulich @ 2016-04-29 12:39 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Paul Durrant, Wei Liu, Tim Deegan

>>> On 29.04.16 at 14:28, <andrew.cooper3@citrix.com> wrote:
> On 29/04/16 09:13, Jan Beulich wrote:
>> prepare_ring_for_helper(), just like share_xen_page_with_guest(),
>> takes a write reference on the page, and hence should similarly be
>> accounted for when determining whether to log a complaint.
>>
>> This requires using recursive locking for the ioreq server lock, as the
>> offending invocation of sh_remove_all_mappings() is down the call stack
>> from hvm_set_ioreq_server_state(). (While not strictly needed to be
>> done in all other instances too, convert all of them for consistency.)
>>
>> At once improve the usefulness of the shadow error message: Log all
>> values involved in triggering it as well as the GFN (to aid
>> understanding which guest page it is that there is a problem with - in
>> cases like the one here the GFN is invariant across invocations, while
>> the MFN obviously can [and will] vary).
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> IMO, this is a 4.7 candidate. I already had a TODO list item of working
> out why the shadow code was always complaining.

Definitely (hence I had copied Wei).

> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>, albeit with one
> further suggestion.
> 
>> --- a/xen/arch/x86/mm/shadow/common.c
>> +++ b/xen/arch/x86/mm/shadow/common.c
>> @@ -35,6 +35,7 @@
>>  #include <asm/current.h>
>>  #include <asm/flushtlb.h>
>>  #include <asm/shadow.h>
>> +#include <asm/hvm/ioreq.h>
>>  #include <xen/numa.h>
>>  #include "private.h"
>>  
>> @@ -2591,7 +2592,8 @@ int sh_remove_write_access_from_sl1p(str
>>  /* Remove all mappings of a guest frame from the shadow tables.
>>   * Returns non-zero if we need to flush TLBs. */
>>  
>> -static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn)
>> +static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn,
>> +                                  unsigned long gfn)
> 
> It would be nice if we could use gfn_t rather than having more unsigned
> longs.

I generally agree, but intentionally didn't to match all the other
shadow code. I'll make changing this dependent on what Tim
thinks.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
  2016-04-29  8:13 [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping Jan Beulich
  2016-04-29  8:29 ` Paul Durrant
  2016-04-29 12:28 ` Andrew Cooper
@ 2016-04-29 13:12 ` Wei Liu
  2 siblings, 0 replies; 8+ messages in thread
From: Wei Liu @ 2016-04-29 13:12 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Paul Durrant, Wei Liu, Tim Deegan

On Fri, Apr 29, 2016 at 02:13:35AM -0600, Jan Beulich wrote:
> prepare_ring_for_helper(), just like share_xen_page_with_guest(),
> takes a write reference on the page, and hence should similarly be
> accounted for when determining whether to log a complaint.
> 
> This requires using recursive locking for the ioreq server lock, as the
> offending invocation of sh_remove_all_mappings() is down the call stack
> from hvm_set_ioreq_server_state(). (While not strictly needed to be
> done in all other instances too, convert all of them for consistency.)
> 
> At once improve the usefulness of the shadow error message: Log all
> values involved in triggering it as well as the GFN (to aid
> understanding which guest page it is that there is a problem with - in
> cases like the one here the GFN is invariant across invocations, while
> the MFN obviously can [and will] vary).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Release-acked-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping
       [not found] <20160429171022.GB97240@deinos.phlegethon.org>
@ 2016-04-29 17:21 ` Andrew Cooper
  0 siblings, 0 replies; 8+ messages in thread
From: Andrew Cooper @ 2016-04-29 17:21 UTC (permalink / raw)
  To: Tim Deegan, Jan Beulich; +Cc: xen-devel, Paul Durrant, Wei Liu

On 29/04/16 18:10, Tim Deegan wrote:
> At 06:39 -0600 on 29 Apr (1461911995), Jan Beulich wrote:
>>>>> On 29.04.16 at 14:28, <andrew.cooper3@citrix.com> wrote:
>>> It would be nice if we could use gfn_t rather than having more unsigned
>>> longs.
>> I generally agree, but intentionally didn't to match all the other
>> shadow code. I'll make changing this dependent on what Tim
>> thinks.
> Is gfn_t even available here?

Yes.  It is available everywhere since c/s 177bd5fb "mem: expose
typesafe mfns/gfns/pfns to common code"  (I wonder who acked a patch
like that ;p)

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2016-04-29 17:22 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-04-29  8:13 [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping Jan Beulich
2016-04-29  8:29 ` Paul Durrant
2016-04-29  9:21   ` Jan Beulich
2016-04-29  9:35     ` Paul Durrant
2016-04-29 12:28 ` Andrew Cooper
2016-04-29 12:39   ` Jan Beulich
2016-04-29 13:12 ` Wei Liu
     [not found] <20160429171022.GB97240@deinos.phlegethon.org>
2016-04-29 17:21 ` Andrew Cooper

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.