All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-26 17:21 ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	Jan Beulich, Roger Pau Monne

Calling _put_page_type while also holding the page_lock
for that page can cause a deadlock.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
---
v3: simplified patch by keeping the additional references already in-place
---
 xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index dfc279d371..e2f74ac770 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
         return -EBUSY;
     }
 
-    /* We can only change the type if count is one */
-    /* Because we are locking pages individually, we need to drop
-     * the lock here, while the page is typed. We cannot risk the 
-     * race of page_unlock and then put_page_type. */
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
     if ( page->u.inuse.type_info != expected_type )
     {
@@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
         return -EEXIST;
     }
 
-    /* Drop the final typecount */
-    put_page_and_type(page);
-
     /* Now that we've dropped the type, we can unlock */
     mem_sharing_page_unlock(page);
 
+    /* Drop the final typecount */
+    put_page_and_type(page);
+
     /* Change the owner */
     ASSERT(page_get_owner(page) == dom_cow);
     page_set_owner(page, d);
@@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     p2m_type_t smfn_type, cmfn_type;
     struct two_gfns tg;
     struct rmap_iterator ri;
+    unsigned long put_count = 0;
 
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
                  cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
@@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
          * Don't change the type of rmap for the client page. */
         rmap_del(gfn, cpage, 0);
         rmap_add(gfn, spage);
-        put_page_and_type(cpage);
+        put_count++;
         d = get_domain_by_id(gfn->domain);
         BUG_ON(!d);
         BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
@@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     mem_sharing_page_unlock(secondpg);
     mem_sharing_page_unlock(firstpg);
 
+    BUG_ON(!put_count);
+    while ( put_count-- )
+        put_page_and_type(cpage);
+
     /* Free the client page */
     if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
         put_page(cpage);
@@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
     {
         if ( !last_gfn )
             mem_sharing_gfn_destroy(page, d, gfn_info);
-        put_page_and_type(page);
         mem_sharing_page_unlock(page);
+        put_page_and_type(page);
         if ( last_gfn )
         {
             if ( !get_page(page, dom_cow) )
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-26 17:21 ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	Jan Beulich, Roger Pau Monne

Calling _put_page_type while also holding the page_lock
for that page can cause a deadlock.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
---
v3: simplified patch by keeping the additional references already in-place
---
 xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index dfc279d371..e2f74ac770 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
         return -EBUSY;
     }
 
-    /* We can only change the type if count is one */
-    /* Because we are locking pages individually, we need to drop
-     * the lock here, while the page is typed. We cannot risk the 
-     * race of page_unlock and then put_page_type. */
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
     if ( page->u.inuse.type_info != expected_type )
     {
@@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
         return -EEXIST;
     }
 
-    /* Drop the final typecount */
-    put_page_and_type(page);
-
     /* Now that we've dropped the type, we can unlock */
     mem_sharing_page_unlock(page);
 
+    /* Drop the final typecount */
+    put_page_and_type(page);
+
     /* Change the owner */
     ASSERT(page_get_owner(page) == dom_cow);
     page_set_owner(page, d);
@@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     p2m_type_t smfn_type, cmfn_type;
     struct two_gfns tg;
     struct rmap_iterator ri;
+    unsigned long put_count = 0;
 
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
                  cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
@@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
          * Don't change the type of rmap for the client page. */
         rmap_del(gfn, cpage, 0);
         rmap_add(gfn, spage);
-        put_page_and_type(cpage);
+        put_count++;
         d = get_domain_by_id(gfn->domain);
         BUG_ON(!d);
         BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
@@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     mem_sharing_page_unlock(secondpg);
     mem_sharing_page_unlock(firstpg);
 
+    BUG_ON(!put_count);
+    while ( put_count-- )
+        put_page_and_type(cpage);
+
     /* Free the client page */
     if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
         put_page(cpage);
@@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
     {
         if ( !last_gfn )
             mem_sharing_gfn_destroy(page, d, gfn_info);
-        put_page_and_type(page);
         mem_sharing_page_unlock(page);
+        put_page_and_type(page);
         if ( last_gfn )
         {
             if ( !get_page(page, dom_cow) )
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-26 17:21   ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	Jan Beulich, Roger Pau Monne

Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
ordering" added extra sanity checking to page_lock/page_unlock for debug builds
with the assumption that no hypervisor path ever locks two pages at once.

This assumption doesn't hold during memory sharing so we introduce separate
functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
in the memory sharing subsystem.

Also placing these functions behind their appropriate kconfig gates.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
---
v3: this patch was "x86/mm: conditionally check page_lock/page_unlock ownership"
---
 xen/arch/x86/mm.c             | 46 ++++++++++++++++++++++++++++-------
 xen/arch/x86/mm/mem_sharing.c |  4 +--
 xen/include/asm-x86/mm.h      |  6 ++++-
 3 files changed, 44 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 45fadbab61..c2c92a96ac 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
 #define current_locked_page_ne_check(x) true
 #endif
 
-int page_lock(struct page_info *page)
+#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
+static int _page_lock(struct page_info *page)
 {
     unsigned long x, nx;
 
-    ASSERT(current_locked_page_check(NULL));
-
     do {
         while ( (x = page->u.inuse.type_info) & PGT_locked )
             cpu_relax();
@@ -2046,17 +2045,13 @@ int page_lock(struct page_info *page)
             return 0;
     } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x );
 
-    current_locked_page_set(page);
-
     return 1;
 }
 
-void page_unlock(struct page_info *page)
+static void _page_unlock(struct page_info *page)
 {
     unsigned long x, nx, y = page->u.inuse.type_info;
 
-    ASSERT(current_locked_page_check(page));
-
     do {
         x = y;
         ASSERT((x & PGT_count_mask) && (x & PGT_locked));
@@ -2065,11 +2060,44 @@ void page_unlock(struct page_info *page)
         /* We must not drop the last reference here. */
         ASSERT(nx & PGT_count_mask);
     } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
+}
+#endif
 
-    current_locked_page_set(NULL);
+#ifdef CONFIG_HAS_MEM_SHARING
+int page_lock_memshr(struct page_info *page)
+{
+    return _page_lock(page);
 }
 
+void page_unlock_memshr(struct page_info *page)
+{
+    _page_unlock(page);
+}
+#endif
+
 #ifdef CONFIG_PV
+int page_lock(struct page_info *page)
+{
+    int rc;
+
+    ASSERT(current_locked_page_check(NULL));
+
+    rc = _page_lock(page);
+
+    current_locked_page_set(page);
+
+    return rc;
+}
+
+void page_unlock(struct page_info *page)
+{
+    ASSERT(current_locked_page_check(page));
+
+    _page_unlock(page);
+
+    current_locked_page_set(NULL);
+}
+
 /*
  * PTE flags that a guest may change without re-validating the PTE.
  * All other bits affect translation, caching, or Xen's safety.
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index e2f74ac770..4b60bab28b 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -118,7 +118,7 @@ static inline int mem_sharing_page_lock(struct page_info *pg)
     pg_lock_data_t *pld = &(this_cpu(__pld));
 
     page_sharing_mm_pre_lock();
-    rc = page_lock(pg);
+    rc = page_lock_memshr(pg);
     if ( rc )
     {
         preempt_disable();
@@ -135,7 +135,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg)
     page_sharing_mm_unlock(pld->mm_unlock_level, 
                            &pld->recurse_count);
     preempt_enable();
-    page_unlock(pg);
+    page_unlock_memshr(pg);
 }
 
 static inline shr_handle_t get_next_handle(void)
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 6faa563167..ba49eee24d 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -356,7 +356,8 @@ struct platform_bad_page {
 const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
 
 /* Per page locks:
- * page_lock() is used for two purposes: pte serialization, and memory sharing.
+ * page_lock() is used for pte serialization.
+ * page_lock_memshr() is used for memory sharing.
  *
  * All users of page lock for pte serialization live in mm.c, use it
  * to lock a page table page during pte updates, do not take other locks within
@@ -378,6 +379,9 @@ const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
 int page_lock(struct page_info *page);
 void page_unlock(struct page_info *page);
 
+int page_lock_memshr(struct page_info *page);
+void page_unlock_memshr(struct page_info *page);
+
 void put_page_type(struct page_info *page);
 int  get_page_type(struct page_info *page, unsigned long type);
 int  put_page_type_preemptible(struct page_info *page);
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-26 17:21   ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	Jan Beulich, Roger Pau Monne

Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
ordering" added extra sanity checking to page_lock/page_unlock for debug builds
with the assumption that no hypervisor path ever locks two pages at once.

This assumption doesn't hold during memory sharing so we introduce separate
functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
in the memory sharing subsystem.

Also placing these functions behind their appropriate kconfig gates.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
---
v3: this patch was "x86/mm: conditionally check page_lock/page_unlock ownership"
---
 xen/arch/x86/mm.c             | 46 ++++++++++++++++++++++++++++-------
 xen/arch/x86/mm/mem_sharing.c |  4 +--
 xen/include/asm-x86/mm.h      |  6 ++++-
 3 files changed, 44 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 45fadbab61..c2c92a96ac 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
 #define current_locked_page_ne_check(x) true
 #endif
 
-int page_lock(struct page_info *page)
+#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
+static int _page_lock(struct page_info *page)
 {
     unsigned long x, nx;
 
-    ASSERT(current_locked_page_check(NULL));
-
     do {
         while ( (x = page->u.inuse.type_info) & PGT_locked )
             cpu_relax();
@@ -2046,17 +2045,13 @@ int page_lock(struct page_info *page)
             return 0;
     } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x );
 
-    current_locked_page_set(page);
-
     return 1;
 }
 
-void page_unlock(struct page_info *page)
+static void _page_unlock(struct page_info *page)
 {
     unsigned long x, nx, y = page->u.inuse.type_info;
 
-    ASSERT(current_locked_page_check(page));
-
     do {
         x = y;
         ASSERT((x & PGT_count_mask) && (x & PGT_locked));
@@ -2065,11 +2060,44 @@ void page_unlock(struct page_info *page)
         /* We must not drop the last reference here. */
         ASSERT(nx & PGT_count_mask);
     } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
+}
+#endif
 
-    current_locked_page_set(NULL);
+#ifdef CONFIG_HAS_MEM_SHARING
+int page_lock_memshr(struct page_info *page)
+{
+    return _page_lock(page);
 }
 
+void page_unlock_memshr(struct page_info *page)
+{
+    _page_unlock(page);
+}
+#endif
+
 #ifdef CONFIG_PV
+int page_lock(struct page_info *page)
+{
+    int rc;
+
+    ASSERT(current_locked_page_check(NULL));
+
+    rc = _page_lock(page);
+
+    current_locked_page_set(page);
+
+    return rc;
+}
+
+void page_unlock(struct page_info *page)
+{
+    ASSERT(current_locked_page_check(page));
+
+    _page_unlock(page);
+
+    current_locked_page_set(NULL);
+}
+
 /*
  * PTE flags that a guest may change without re-validating the PTE.
  * All other bits affect translation, caching, or Xen's safety.
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index e2f74ac770..4b60bab28b 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -118,7 +118,7 @@ static inline int mem_sharing_page_lock(struct page_info *pg)
     pg_lock_data_t *pld = &(this_cpu(__pld));
 
     page_sharing_mm_pre_lock();
-    rc = page_lock(pg);
+    rc = page_lock_memshr(pg);
     if ( rc )
     {
         preempt_disable();
@@ -135,7 +135,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg)
     page_sharing_mm_unlock(pld->mm_unlock_level, 
                            &pld->recurse_count);
     preempt_enable();
-    page_unlock(pg);
+    page_unlock_memshr(pg);
 }
 
 static inline shr_handle_t get_next_handle(void)
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 6faa563167..ba49eee24d 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -356,7 +356,8 @@ struct platform_bad_page {
 const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
 
 /* Per page locks:
- * page_lock() is used for two purposes: pte serialization, and memory sharing.
+ * page_lock() is used for pte serialization.
+ * page_lock_memshr() is used for memory sharing.
  *
  * All users of page lock for pte serialization live in mm.c, use it
  * to lock a page table page during pte updates, do not take other locks within
@@ -378,6 +379,9 @@ const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
 int page_lock(struct page_info *page);
 void page_unlock(struct page_info *page);
 
+int page_lock_memshr(struct page_info *page);
+void page_unlock_memshr(struct page_info *page);
+
 void put_page_type(struct page_info *page);
 int  get_page_type(struct page_info *page, unsigned long type);
 int  put_page_type_preemptible(struct page_info *page);
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* [PATCH v3 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds
@ 2019-04-26 17:21   ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Tamas K Lengyel, Wei Liu, Jan Beulich, Roger Pau Monne

Improves performance for release builds.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
---
 xen/include/asm-x86/mem_sharing.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h
index 0e77b7d935..52ea91efa0 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -25,7 +25,9 @@
 #include <public/memory.h>
 
 /* Auditing of memory sharing code? */
+#ifndef NDEBUG
 #define MEM_SHARING_AUDIT 1
+#endif
 
 typedef uint64_t shr_handle_t; 
 
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* [Xen-devel] [PATCH v3 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds
@ 2019-04-26 17:21   ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Tamas K Lengyel, Wei Liu, Jan Beulich, Roger Pau Monne

Improves performance for release builds.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
---
 xen/include/asm-x86/mem_sharing.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h
index 0e77b7d935..52ea91efa0 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -25,7 +25,9 @@
 #include <public/memory.h>
 
 /* Auditing of memory sharing code? */
+#ifndef NDEBUG
 #define MEM_SHARING_AUDIT 1
+#endif
 
 typedef uint64_t shr_handle_t; 
 
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* [PATCH v3 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled
@ 2019-04-26 17:21   ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, Konrad Rzeszutek Wilk, George Dunlap,
	Andrew Cooper, Ian Jackson, Tim Deegan, Julien Grall,
	Stefano Stabellini, Jan Beulich, Roger Pau Monne

Disable it by default as it is only an experimental subsystem.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Tim Deegan <tim@xen.org>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/arch/x86/Kconfig              |  8 +++++++-
 xen/arch/x86/domctl.c             |  2 ++
 xen/arch/x86/mm.c                 |  4 ++--
 xen/arch/x86/mm/Makefile          |  2 +-
 xen/arch/x86/x86_64/compat/mm.c   |  2 ++
 xen/arch/x86/x86_64/mm.c          |  2 ++
 xen/common/Kconfig                |  3 ---
 xen/common/domain.c               |  2 +-
 xen/common/grant_table.c          |  2 +-
 xen/common/memory.c               |  2 +-
 xen/common/vm_event.c             |  6 +++---
 xen/include/asm-x86/mem_sharing.h | 31 +++++++++++++++++++++++++++++++
 xen/include/asm-x86/mm.h          |  3 +++
 xen/include/xen/sched.h           |  2 +-
 xen/include/xsm/dummy.h           |  2 +-
 xen/include/xsm/xsm.h             |  4 ++--
 xen/xsm/dummy.c                   |  2 +-
 xen/xsm/flask/hooks.c             |  4 ++--
 18 files changed, 63 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig
index 4b8b07b549..af7c25543f 100644
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -17,7 +17,6 @@ config X86
 	select HAS_KEXEC
 	select MEM_ACCESS_ALWAYS_ON
 	select HAS_MEM_PAGING
-	select HAS_MEM_SHARING
 	select HAS_NS16550
 	select HAS_PASSTHROUGH
 	select HAS_PCI
@@ -198,6 +197,13 @@ config PV_SHIM_EXCLUSIVE
 	  firmware, and will not function correctly in other scenarios.
 
 	  If unsure, say N.
+
+config MEM_SHARING
+    bool
+    default n
+    depends on HVM
+    prompt "Xen memory sharing support" if EXPERT = "y"
+
 endmenu
 
 source "common/Kconfig"
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 9bf2d0820f..bc9e024ccc 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1231,9 +1231,11 @@ long arch_do_domctl(
         break;
     }
 
+#ifdef CONFIG_MEM_SHARING
     case XEN_DOMCTL_mem_sharing_op:
         ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
         break;
+#endif
 
 #if P2M_AUDIT && defined(CONFIG_HVM)
     case XEN_DOMCTL_audit_p2m:
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c2c92a96ac..c3b0f3115c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2030,7 +2030,7 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
 #define current_locked_page_ne_check(x) true
 #endif
 
-#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
+#if defined(CONFIG_PV) || defined(CONFIG_MEM_SHARING)
 static int _page_lock(struct page_info *page)
 {
     unsigned long x, nx;
@@ -2063,7 +2063,7 @@ static void _page_unlock(struct page_info *page)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 int page_lock_memshr(struct page_info *page)
 {
     return _page_lock(page);
diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
index 5a17646f98..5010a29d6c 100644
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_HVM) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
 obj-$(CONFIG_SHADOW_PAGING) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
 obj-$(CONFIG_MEM_ACCESS) += mem_access.o
 obj-y += mem_paging.o
-obj-y += mem_sharing.o
+obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
 obj-y += p2m.o p2m-pt.o
 obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
 obj-y += paging.o
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 32410ed273..d4c6be3032 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -152,8 +152,10 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_paging_op:
         return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
 
+#ifdef CONFIG_MEM_SHARING
     case XENMEM_sharing_op:
         return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
 
     default:
         rc = -ENOSYS;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d8f558bc3a..51d1d511f2 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -993,8 +993,10 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_paging_op:
         return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
 
+#ifdef CONFIG_MEM_SHARING
     case XENMEM_sharing_op:
         return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
 
     default:
         rc = -ENOSYS;
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index c838506241..80575cac10 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -45,9 +45,6 @@ config MEM_ACCESS
 config HAS_MEM_PAGING
 	bool
 
-config HAS_MEM_SHARING
-	bool
-
 config HAS_PDX
 	bool
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 88bbe984bc..bb072cf93f 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -926,7 +926,7 @@ static void complete_domain_destroy(struct rcu_head *head)
     xfree(d->vm_event_paging);
 #endif
     xfree(d->vm_event_monitor);
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     xfree(d->vm_event_share);
 #endif
 
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 80728ea57d..6c40dccae9 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3760,7 +3760,7 @@ void grant_table_init_vcpu(struct vcpu *v)
     v->maptrack_tail = MAPTRACK_TAIL;
 }
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
                             gfn_t *gfn, uint16_t *status)
 {
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 86567e6117..915f2cee1a 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1676,7 +1676,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly,
         return -EAGAIN;
     }
 #endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) )
     {
         if ( page )
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 6e68be47bc..163a671cea 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -544,7 +544,7 @@ static void monitor_notification(struct vcpu *v, unsigned int port)
     vm_event_resume(v->domain, v->domain->vm_event_monitor);
 }
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 {
@@ -574,7 +574,7 @@ void vm_event_cleanup(struct domain *d)
         destroy_waitqueue_head(&d->vm_event_monitor->wq);
         (void)vm_event_disable(d, &d->vm_event_monitor);
     }
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     if ( vm_event_check_ring(d->vm_event_share) )
     {
         destroy_waitqueue_head(&d->vm_event_share->wq);
@@ -720,7 +720,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
     }
     break;
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     case XEN_DOMCTL_VM_EVENT_OP_SHARING:
     {
         rc = -EINVAL;
diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h
index 52ea91efa0..e0f8792f52 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -24,6 +24,8 @@
 #include <public/domctl.h>
 #include <public/memory.h>
 
+#ifdef CONFIG_MEM_SHARING
+
 /* Auditing of memory sharing code? */
 #ifndef NDEBUG
 #define MEM_SHARING_AUDIT 1
@@ -98,4 +100,33 @@ void mem_sharing_init(void);
  */
 int relinquish_shared_pages(struct domain *d);
 
+#else
+
+static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
+{
+    return 0;
+}
+static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
+{
+    return 0;
+}
+static inline int mem_sharing_unshare_page(struct domain *d,
+                                           unsigned long gfn,
+                                           uint16_t flags)
+{
+    return 0;
+}
+static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
+                              bool allow_sleep)
+{
+    return 0;
+}
+static inline int relinquish_shared_pages(struct domain *d)
+{
+    return 0;
+}
+static inline void mem_sharing_init(void) {}
+
+#endif
+
 #endif /* __MEM_SHARING_H__ */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index ba49eee24d..a3c754cb2f 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -127,6 +127,8 @@ struct page_info
         /* For non-pinnable single-page shadows, a higher entry that points
          * at us. */
         paddr_t up;
+
+#ifdef CONFIG_MEM_SHARING
         /* For shared/sharable pages, we use a doubly-linked list
          * of all the {pfn,domain} pairs that map this page. We also include
          * an opaque handle, which is effectively a version, so that clients
@@ -134,6 +136,7 @@ struct page_info
          * This list is allocated and freed when a page is shared/unshared.
          */
         struct page_sharing_info *sharing;
+#endif
     };
 
     /* Reference count and various PGC_xxx flags and fields. */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 748bb0f2f9..17cf8785fb 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -462,7 +462,7 @@ struct domain
     /* Various vm_events */
 
     /* Memory sharing support */
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     struct vm_event_domain *vm_event_share;
 #endif
     /* Memory paging support */
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index e628b1c6af..8afdec9fe8 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -603,7 +603,7 @@ static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 8a78d8abd3..8ec6b1a6e8 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -151,7 +151,7 @@ struct xsm_operations {
     int (*mem_paging) (struct domain *d);
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     int (*mem_sharing) (struct domain *d);
 #endif
 
@@ -603,7 +603,7 @@ static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->mem_sharing(d);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 1fe0e746fa..6158dce814 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -129,7 +129,7 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, mem_paging);
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     set_to_dummy_if_null(ops, mem_sharing);
 #endif
 
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 3d00c747f6..f5f3b42e6e 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1270,7 +1270,7 @@ static int flask_mem_paging(struct domain *d)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 static int flask_mem_sharing(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING);
@@ -1838,7 +1838,7 @@ static struct xsm_operations flask_ops = {
     .mem_paging = flask_mem_paging,
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     .mem_sharing = flask_mem_sharing,
 #endif
 
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* [Xen-devel] [PATCH v3 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled
@ 2019-04-26 17:21   ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-26 17:21 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, Konrad Rzeszutek Wilk, George Dunlap,
	Andrew Cooper, Ian Jackson, Tim Deegan, Julien Grall,
	Stefano Stabellini, Jan Beulich, Roger Pau Monne

Disable it by default as it is only an experimental subsystem.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Tim Deegan <tim@xen.org>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/arch/x86/Kconfig              |  8 +++++++-
 xen/arch/x86/domctl.c             |  2 ++
 xen/arch/x86/mm.c                 |  4 ++--
 xen/arch/x86/mm/Makefile          |  2 +-
 xen/arch/x86/x86_64/compat/mm.c   |  2 ++
 xen/arch/x86/x86_64/mm.c          |  2 ++
 xen/common/Kconfig                |  3 ---
 xen/common/domain.c               |  2 +-
 xen/common/grant_table.c          |  2 +-
 xen/common/memory.c               |  2 +-
 xen/common/vm_event.c             |  6 +++---
 xen/include/asm-x86/mem_sharing.h | 31 +++++++++++++++++++++++++++++++
 xen/include/asm-x86/mm.h          |  3 +++
 xen/include/xen/sched.h           |  2 +-
 xen/include/xsm/dummy.h           |  2 +-
 xen/include/xsm/xsm.h             |  4 ++--
 xen/xsm/dummy.c                   |  2 +-
 xen/xsm/flask/hooks.c             |  4 ++--
 18 files changed, 63 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig
index 4b8b07b549..af7c25543f 100644
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -17,7 +17,6 @@ config X86
 	select HAS_KEXEC
 	select MEM_ACCESS_ALWAYS_ON
 	select HAS_MEM_PAGING
-	select HAS_MEM_SHARING
 	select HAS_NS16550
 	select HAS_PASSTHROUGH
 	select HAS_PCI
@@ -198,6 +197,13 @@ config PV_SHIM_EXCLUSIVE
 	  firmware, and will not function correctly in other scenarios.
 
 	  If unsure, say N.
+
+config MEM_SHARING
+    bool
+    default n
+    depends on HVM
+    prompt "Xen memory sharing support" if EXPERT = "y"
+
 endmenu
 
 source "common/Kconfig"
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 9bf2d0820f..bc9e024ccc 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1231,9 +1231,11 @@ long arch_do_domctl(
         break;
     }
 
+#ifdef CONFIG_MEM_SHARING
     case XEN_DOMCTL_mem_sharing_op:
         ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
         break;
+#endif
 
 #if P2M_AUDIT && defined(CONFIG_HVM)
     case XEN_DOMCTL_audit_p2m:
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c2c92a96ac..c3b0f3115c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2030,7 +2030,7 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
 #define current_locked_page_ne_check(x) true
 #endif
 
-#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
+#if defined(CONFIG_PV) || defined(CONFIG_MEM_SHARING)
 static int _page_lock(struct page_info *page)
 {
     unsigned long x, nx;
@@ -2063,7 +2063,7 @@ static void _page_unlock(struct page_info *page)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 int page_lock_memshr(struct page_info *page)
 {
     return _page_lock(page);
diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
index 5a17646f98..5010a29d6c 100644
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_HVM) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
 obj-$(CONFIG_SHADOW_PAGING) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
 obj-$(CONFIG_MEM_ACCESS) += mem_access.o
 obj-y += mem_paging.o
-obj-y += mem_sharing.o
+obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
 obj-y += p2m.o p2m-pt.o
 obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
 obj-y += paging.o
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 32410ed273..d4c6be3032 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -152,8 +152,10 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_paging_op:
         return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
 
+#ifdef CONFIG_MEM_SHARING
     case XENMEM_sharing_op:
         return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
 
     default:
         rc = -ENOSYS;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d8f558bc3a..51d1d511f2 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -993,8 +993,10 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_paging_op:
         return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
 
+#ifdef CONFIG_MEM_SHARING
     case XENMEM_sharing_op:
         return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
 
     default:
         rc = -ENOSYS;
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index c838506241..80575cac10 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -45,9 +45,6 @@ config MEM_ACCESS
 config HAS_MEM_PAGING
 	bool
 
-config HAS_MEM_SHARING
-	bool
-
 config HAS_PDX
 	bool
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 88bbe984bc..bb072cf93f 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -926,7 +926,7 @@ static void complete_domain_destroy(struct rcu_head *head)
     xfree(d->vm_event_paging);
 #endif
     xfree(d->vm_event_monitor);
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     xfree(d->vm_event_share);
 #endif
 
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 80728ea57d..6c40dccae9 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3760,7 +3760,7 @@ void grant_table_init_vcpu(struct vcpu *v)
     v->maptrack_tail = MAPTRACK_TAIL;
 }
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
                             gfn_t *gfn, uint16_t *status)
 {
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 86567e6117..915f2cee1a 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1676,7 +1676,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly,
         return -EAGAIN;
     }
 #endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) )
     {
         if ( page )
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 6e68be47bc..163a671cea 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -544,7 +544,7 @@ static void monitor_notification(struct vcpu *v, unsigned int port)
     vm_event_resume(v->domain, v->domain->vm_event_monitor);
 }
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 {
@@ -574,7 +574,7 @@ void vm_event_cleanup(struct domain *d)
         destroy_waitqueue_head(&d->vm_event_monitor->wq);
         (void)vm_event_disable(d, &d->vm_event_monitor);
     }
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     if ( vm_event_check_ring(d->vm_event_share) )
     {
         destroy_waitqueue_head(&d->vm_event_share->wq);
@@ -720,7 +720,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
     }
     break;
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     case XEN_DOMCTL_VM_EVENT_OP_SHARING:
     {
         rc = -EINVAL;
diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h
index 52ea91efa0..e0f8792f52 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -24,6 +24,8 @@
 #include <public/domctl.h>
 #include <public/memory.h>
 
+#ifdef CONFIG_MEM_SHARING
+
 /* Auditing of memory sharing code? */
 #ifndef NDEBUG
 #define MEM_SHARING_AUDIT 1
@@ -98,4 +100,33 @@ void mem_sharing_init(void);
  */
 int relinquish_shared_pages(struct domain *d);
 
+#else
+
+static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
+{
+    return 0;
+}
+static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
+{
+    return 0;
+}
+static inline int mem_sharing_unshare_page(struct domain *d,
+                                           unsigned long gfn,
+                                           uint16_t flags)
+{
+    return 0;
+}
+static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
+                              bool allow_sleep)
+{
+    return 0;
+}
+static inline int relinquish_shared_pages(struct domain *d)
+{
+    return 0;
+}
+static inline void mem_sharing_init(void) {}
+
+#endif
+
 #endif /* __MEM_SHARING_H__ */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index ba49eee24d..a3c754cb2f 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -127,6 +127,8 @@ struct page_info
         /* For non-pinnable single-page shadows, a higher entry that points
          * at us. */
         paddr_t up;
+
+#ifdef CONFIG_MEM_SHARING
         /* For shared/sharable pages, we use a doubly-linked list
          * of all the {pfn,domain} pairs that map this page. We also include
          * an opaque handle, which is effectively a version, so that clients
@@ -134,6 +136,7 @@ struct page_info
          * This list is allocated and freed when a page is shared/unshared.
          */
         struct page_sharing_info *sharing;
+#endif
     };
 
     /* Reference count and various PGC_xxx flags and fields. */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 748bb0f2f9..17cf8785fb 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -462,7 +462,7 @@ struct domain
     /* Various vm_events */
 
     /* Memory sharing support */
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     struct vm_event_domain *vm_event_share;
 #endif
     /* Memory paging support */
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index e628b1c6af..8afdec9fe8 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -603,7 +603,7 @@ static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 8a78d8abd3..8ec6b1a6e8 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -151,7 +151,7 @@ struct xsm_operations {
     int (*mem_paging) (struct domain *d);
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     int (*mem_sharing) (struct domain *d);
 #endif
 
@@ -603,7 +603,7 @@ static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d)
 {
     return xsm_ops->mem_sharing(d);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 1fe0e746fa..6158dce814 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -129,7 +129,7 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, mem_paging);
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     set_to_dummy_if_null(ops, mem_sharing);
 #endif
 
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 3d00c747f6..f5f3b42e6e 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1270,7 +1270,7 @@ static int flask_mem_paging(struct domain *d)
 }
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
 static int flask_mem_sharing(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING);
@@ -1838,7 +1838,7 @@ static struct xsm_operations flask_ops = {
     .mem_paging = flask_mem_paging,
 #endif
 
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
     .mem_sharing = flask_mem_sharing,
 #endif
 
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:32   ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 14:32 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> Calling _put_page_type while also holding the page_lock
> for that page can cause a deadlock.
> 
> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Roger Pau Monne <roger.pau@citrix.com>
> ---
> v3: simplified patch by keeping the additional references already in-place
> ---
>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index dfc279d371..e2f74ac770 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>          return -EBUSY;
>      }
>  
> -    /* We can only change the type if count is one */
> -    /* Because we are locking pages individually, we need to drop
> -     * the lock here, while the page is typed. We cannot risk the 
> -     * race of page_unlock and then put_page_type. */
>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>      if ( page->u.inuse.type_info != expected_type )
>      {
> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>          return -EEXIST;
>      }
>  
> -    /* Drop the final typecount */
> -    put_page_and_type(page);
> -
>      /* Now that we've dropped the type, we can unlock */
>      mem_sharing_page_unlock(page);
>  
> +    /* Drop the final typecount */
> +    put_page_and_type(page);
> +
>      /* Change the owner */
>      ASSERT(page_get_owner(page) == dom_cow);
>      page_set_owner(page, d);
> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>      p2m_type_t smfn_type, cmfn_type;
>      struct two_gfns tg;
>      struct rmap_iterator ri;
> +    unsigned long put_count = 0;
>  
>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>           * Don't change the type of rmap for the client page. */
>          rmap_del(gfn, cpage, 0);
>          rmap_add(gfn, spage);
> -        put_page_and_type(cpage);
> +        put_count++;
>          d = get_domain_by_id(gfn->domain);
>          BUG_ON(!d);
>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>      mem_sharing_page_unlock(secondpg);
>      mem_sharing_page_unlock(firstpg);
>  
> +    BUG_ON(!put_count);
> +    while ( put_count-- )
> +        put_page_and_type(cpage);
> +
>      /* Free the client page */
>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>          put_page(cpage);
> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>      {
>          if ( !last_gfn )
>              mem_sharing_gfn_destroy(page, d, gfn_info);
> -        put_page_and_type(page);
>          mem_sharing_page_unlock(page);
> +        put_page_and_type(page);
>          if ( last_gfn )
>          {
>              if ( !get_page(page, dom_cow) )

...Probably should have mentioned that this needs to be applied after
your other patch. :-)

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:32   ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 14:32 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> Calling _put_page_type while also holding the page_lock
> for that page can cause a deadlock.
> 
> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Roger Pau Monne <roger.pau@citrix.com>
> ---
> v3: simplified patch by keeping the additional references already in-place
> ---
>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index dfc279d371..e2f74ac770 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>          return -EBUSY;
>      }
>  
> -    /* We can only change the type if count is one */
> -    /* Because we are locking pages individually, we need to drop
> -     * the lock here, while the page is typed. We cannot risk the 
> -     * race of page_unlock and then put_page_type. */
>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>      if ( page->u.inuse.type_info != expected_type )
>      {
> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>          return -EEXIST;
>      }
>  
> -    /* Drop the final typecount */
> -    put_page_and_type(page);
> -
>      /* Now that we've dropped the type, we can unlock */
>      mem_sharing_page_unlock(page);
>  
> +    /* Drop the final typecount */
> +    put_page_and_type(page);
> +
>      /* Change the owner */
>      ASSERT(page_get_owner(page) == dom_cow);
>      page_set_owner(page, d);
> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>      p2m_type_t smfn_type, cmfn_type;
>      struct two_gfns tg;
>      struct rmap_iterator ri;
> +    unsigned long put_count = 0;
>  
>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>           * Don't change the type of rmap for the client page. */
>          rmap_del(gfn, cpage, 0);
>          rmap_add(gfn, spage);
> -        put_page_and_type(cpage);
> +        put_count++;
>          d = get_domain_by_id(gfn->domain);
>          BUG_ON(!d);
>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>      mem_sharing_page_unlock(secondpg);
>      mem_sharing_page_unlock(firstpg);
>  
> +    BUG_ON(!put_count);
> +    while ( put_count-- )
> +        put_page_and_type(cpage);
> +
>      /* Free the client page */
>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>          put_page(cpage);
> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>      {
>          if ( !last_gfn )
>              mem_sharing_gfn_destroy(page, d, gfn_info);
> -        put_page_and_type(page);
>          mem_sharing_page_unlock(page);
> +        put_page_and_type(page);
>          if ( last_gfn )
>          {
>              if ( !get_page(page, dom_cow) )

...Probably should have mentioned that this needs to be applied after
your other patch. :-)

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:46     ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 14:46 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/29/19 3:32 PM, George Dunlap wrote:
> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
>> Calling _put_page_type while also holding the page_lock
>> for that page can cause a deadlock.
>>
>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
>> Cc: Jan Beulich <jbeulich@suse.com>
>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>> Cc: Wei Liu <wei.liu2@citrix.com>
>> Cc: Roger Pau Monne <roger.pau@citrix.com>
>> ---
>> v3: simplified patch by keeping the additional references already in-place
>> ---
>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>>  1 file changed, 10 insertions(+), 9 deletions(-)
>>
>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>> index dfc279d371..e2f74ac770 100644
>> --- a/xen/arch/x86/mm/mem_sharing.c
>> +++ b/xen/arch/x86/mm/mem_sharing.c
>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>          return -EBUSY;
>>      }
>>  
>> -    /* We can only change the type if count is one */
>> -    /* Because we are locking pages individually, we need to drop
>> -     * the lock here, while the page is typed. We cannot risk the 
>> -     * race of page_unlock and then put_page_type. */
>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>>      if ( page->u.inuse.type_info != expected_type )
>>      {
>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>          return -EEXIST;
>>      }
>>  
>> -    /* Drop the final typecount */
>> -    put_page_and_type(page);
>> -
>>      /* Now that we've dropped the type, we can unlock */
>>      mem_sharing_page_unlock(page);
>>  
>> +    /* Drop the final typecount */
>> +    put_page_and_type(page);
>> +
>>      /* Change the owner */
>>      ASSERT(page_get_owner(page) == dom_cow);
>>      page_set_owner(page, d);
>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>      p2m_type_t smfn_type, cmfn_type;
>>      struct two_gfns tg;
>>      struct rmap_iterator ri;
>> +    unsigned long put_count = 0;
>>  
>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>           * Don't change the type of rmap for the client page. */
>>          rmap_del(gfn, cpage, 0);
>>          rmap_add(gfn, spage);
>> -        put_page_and_type(cpage);
>> +        put_count++;
>>          d = get_domain_by_id(gfn->domain);
>>          BUG_ON(!d);
>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>      mem_sharing_page_unlock(secondpg);
>>      mem_sharing_page_unlock(firstpg);
>>  
>> +    BUG_ON(!put_count);
>> +    while ( put_count-- )
>> +        put_page_and_type(cpage);
>> +
>>      /* Free the client page */
>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>          put_page(cpage);
>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>>      {
>>          if ( !last_gfn )
>>              mem_sharing_gfn_destroy(page, d, gfn_info);
>> -        put_page_and_type(page);
>>          mem_sharing_page_unlock(page);
>> +        put_page_and_type(page);
>>          if ( last_gfn )
>>          {
>>              if ( !get_page(page, dom_cow) )
> 
> ...Probably should have mentioned that this needs to be applied after
> your other patch. :-)

Hmm -- actually, the base appears to be a non-publicly-available tree
(Andy's private x86-next).

I think series should:
1. Always be posted against a publicly-available tree, and
2. If that tree is not xenbits/xen.git staging, the URL and branch
should be provided.

 -George


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:46     ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 14:46 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/29/19 3:32 PM, George Dunlap wrote:
> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
>> Calling _put_page_type while also holding the page_lock
>> for that page can cause a deadlock.
>>
>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
>> Cc: Jan Beulich <jbeulich@suse.com>
>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>> Cc: Wei Liu <wei.liu2@citrix.com>
>> Cc: Roger Pau Monne <roger.pau@citrix.com>
>> ---
>> v3: simplified patch by keeping the additional references already in-place
>> ---
>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>>  1 file changed, 10 insertions(+), 9 deletions(-)
>>
>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>> index dfc279d371..e2f74ac770 100644
>> --- a/xen/arch/x86/mm/mem_sharing.c
>> +++ b/xen/arch/x86/mm/mem_sharing.c
>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>          return -EBUSY;
>>      }
>>  
>> -    /* We can only change the type if count is one */
>> -    /* Because we are locking pages individually, we need to drop
>> -     * the lock here, while the page is typed. We cannot risk the 
>> -     * race of page_unlock and then put_page_type. */
>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>>      if ( page->u.inuse.type_info != expected_type )
>>      {
>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>          return -EEXIST;
>>      }
>>  
>> -    /* Drop the final typecount */
>> -    put_page_and_type(page);
>> -
>>      /* Now that we've dropped the type, we can unlock */
>>      mem_sharing_page_unlock(page);
>>  
>> +    /* Drop the final typecount */
>> +    put_page_and_type(page);
>> +
>>      /* Change the owner */
>>      ASSERT(page_get_owner(page) == dom_cow);
>>      page_set_owner(page, d);
>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>      p2m_type_t smfn_type, cmfn_type;
>>      struct two_gfns tg;
>>      struct rmap_iterator ri;
>> +    unsigned long put_count = 0;
>>  
>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>           * Don't change the type of rmap for the client page. */
>>          rmap_del(gfn, cpage, 0);
>>          rmap_add(gfn, spage);
>> -        put_page_and_type(cpage);
>> +        put_count++;
>>          d = get_domain_by_id(gfn->domain);
>>          BUG_ON(!d);
>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>      mem_sharing_page_unlock(secondpg);
>>      mem_sharing_page_unlock(firstpg);
>>  
>> +    BUG_ON(!put_count);
>> +    while ( put_count-- )
>> +        put_page_and_type(cpage);
>> +
>>      /* Free the client page */
>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>          put_page(cpage);
>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>>      {
>>          if ( !last_gfn )
>>              mem_sharing_gfn_destroy(page, d, gfn_info);
>> -        put_page_and_type(page);
>>          mem_sharing_page_unlock(page);
>> +        put_page_and_type(page);
>>          if ( last_gfn )
>>          {
>>              if ( !get_page(page, dom_cow) )
> 
> ...Probably should have mentioned that this needs to be applied after
> your other patch. :-)

Hmm -- actually, the base appears to be a non-publicly-available tree
(Andy's private x86-next).

I think series should:
1. Always be posted against a publicly-available tree, and
2. If that tree is not xenbits/xen.git staging, the URL and branch
should be provided.

 -George


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:49       ` Andrew Cooper
  0 siblings, 0 replies; 74+ messages in thread
From: Andrew Cooper @ 2019-04-29 14:49 UTC (permalink / raw)
  To: George Dunlap, Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Wei Liu, Jan Beulich, Roger Pau Monne

On 29/04/2019 15:46, George Dunlap wrote:
> On 4/29/19 3:32 PM, George Dunlap wrote:
>> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
>>> Calling _put_page_type while also holding the page_lock
>>> for that page can cause a deadlock.
>>>
>>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
>>> Cc: Jan Beulich <jbeulich@suse.com>
>>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>>> Cc: Wei Liu <wei.liu2@citrix.com>
>>> Cc: Roger Pau Monne <roger.pau@citrix.com>
>>> ---
>>> v3: simplified patch by keeping the additional references already in-place
>>> ---
>>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>>>  1 file changed, 10 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>>> index dfc279d371..e2f74ac770 100644
>>> --- a/xen/arch/x86/mm/mem_sharing.c
>>> +++ b/xen/arch/x86/mm/mem_sharing.c
>>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>          return -EBUSY;
>>>      }
>>>  
>>> -    /* We can only change the type if count is one */
>>> -    /* Because we are locking pages individually, we need to drop
>>> -     * the lock here, while the page is typed. We cannot risk the 
>>> -     * race of page_unlock and then put_page_type. */
>>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>>>      if ( page->u.inuse.type_info != expected_type )
>>>      {
>>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>          return -EEXIST;
>>>      }
>>>  
>>> -    /* Drop the final typecount */
>>> -    put_page_and_type(page);
>>> -
>>>      /* Now that we've dropped the type, we can unlock */
>>>      mem_sharing_page_unlock(page);
>>>  
>>> +    /* Drop the final typecount */
>>> +    put_page_and_type(page);
>>> +
>>>      /* Change the owner */
>>>      ASSERT(page_get_owner(page) == dom_cow);
>>>      page_set_owner(page, d);
>>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>      p2m_type_t smfn_type, cmfn_type;
>>>      struct two_gfns tg;
>>>      struct rmap_iterator ri;
>>> +    unsigned long put_count = 0;
>>>  
>>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
>>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>           * Don't change the type of rmap for the client page. */
>>>          rmap_del(gfn, cpage, 0);
>>>          rmap_add(gfn, spage);
>>> -        put_page_and_type(cpage);
>>> +        put_count++;
>>>          d = get_domain_by_id(gfn->domain);
>>>          BUG_ON(!d);
>>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>      mem_sharing_page_unlock(secondpg);
>>>      mem_sharing_page_unlock(firstpg);
>>>  
>>> +    BUG_ON(!put_count);
>>> +    while ( put_count-- )
>>> +        put_page_and_type(cpage);
>>> +
>>>      /* Free the client page */
>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>>          put_page(cpage);
>>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>>>      {
>>>          if ( !last_gfn )
>>>              mem_sharing_gfn_destroy(page, d, gfn_info);
>>> -        put_page_and_type(page);
>>>          mem_sharing_page_unlock(page);
>>> +        put_page_and_type(page);
>>>          if ( last_gfn )
>>>          {
>>>              if ( !get_page(page, dom_cow) )
>> ...Probably should have mentioned that this needs to be applied after
>> your other patch. :-)
> Hmm -- actually, the base appears to be a non-publicly-available tree
> (Andy's private x86-next).

Its perfectly public

http://xenbits.xen.org/gitweb/?p=people/andrewcoop/xen.git;a=shortlog;h=refs/heads/x86-next

and has been in effect during commit freezes for the past several years
worth of releases.

It is fine to post a tree based on something other than xen.git/staging
so long as the cover letter identifies where the series is based.  A lot
of development already occurs in this way on xen-devel.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:49       ` Andrew Cooper
  0 siblings, 0 replies; 74+ messages in thread
From: Andrew Cooper @ 2019-04-29 14:49 UTC (permalink / raw)
  To: George Dunlap, Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Wei Liu, Jan Beulich, Roger Pau Monne

On 29/04/2019 15:46, George Dunlap wrote:
> On 4/29/19 3:32 PM, George Dunlap wrote:
>> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
>>> Calling _put_page_type while also holding the page_lock
>>> for that page can cause a deadlock.
>>>
>>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
>>> Cc: Jan Beulich <jbeulich@suse.com>
>>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>>> Cc: Wei Liu <wei.liu2@citrix.com>
>>> Cc: Roger Pau Monne <roger.pau@citrix.com>
>>> ---
>>> v3: simplified patch by keeping the additional references already in-place
>>> ---
>>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>>>  1 file changed, 10 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>>> index dfc279d371..e2f74ac770 100644
>>> --- a/xen/arch/x86/mm/mem_sharing.c
>>> +++ b/xen/arch/x86/mm/mem_sharing.c
>>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>          return -EBUSY;
>>>      }
>>>  
>>> -    /* We can only change the type if count is one */
>>> -    /* Because we are locking pages individually, we need to drop
>>> -     * the lock here, while the page is typed. We cannot risk the 
>>> -     * race of page_unlock and then put_page_type. */
>>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>>>      if ( page->u.inuse.type_info != expected_type )
>>>      {
>>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>          return -EEXIST;
>>>      }
>>>  
>>> -    /* Drop the final typecount */
>>> -    put_page_and_type(page);
>>> -
>>>      /* Now that we've dropped the type, we can unlock */
>>>      mem_sharing_page_unlock(page);
>>>  
>>> +    /* Drop the final typecount */
>>> +    put_page_and_type(page);
>>> +
>>>      /* Change the owner */
>>>      ASSERT(page_get_owner(page) == dom_cow);
>>>      page_set_owner(page, d);
>>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>      p2m_type_t smfn_type, cmfn_type;
>>>      struct two_gfns tg;
>>>      struct rmap_iterator ri;
>>> +    unsigned long put_count = 0;
>>>  
>>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
>>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>           * Don't change the type of rmap for the client page. */
>>>          rmap_del(gfn, cpage, 0);
>>>          rmap_add(gfn, spage);
>>> -        put_page_and_type(cpage);
>>> +        put_count++;
>>>          d = get_domain_by_id(gfn->domain);
>>>          BUG_ON(!d);
>>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>      mem_sharing_page_unlock(secondpg);
>>>      mem_sharing_page_unlock(firstpg);
>>>  
>>> +    BUG_ON(!put_count);
>>> +    while ( put_count-- )
>>> +        put_page_and_type(cpage);
>>> +
>>>      /* Free the client page */
>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>>          put_page(cpage);
>>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>>>      {
>>>          if ( !last_gfn )
>>>              mem_sharing_gfn_destroy(page, d, gfn_info);
>>> -        put_page_and_type(page);
>>>          mem_sharing_page_unlock(page);
>>> +        put_page_and_type(page);
>>>          if ( last_gfn )
>>>          {
>>>              if ( !get_page(page, dom_cow) )
>> ...Probably should have mentioned that this needs to be applied after
>> your other patch. :-)
> Hmm -- actually, the base appears to be a non-publicly-available tree
> (Andy's private x86-next).

Its perfectly public

http://xenbits.xen.org/gitweb/?p=people/andrewcoop/xen.git;a=shortlog;h=refs/heads/x86-next

and has been in effect during commit freezes for the past several years
worth of releases.

It is fine to post a tree based on something other than xen.git/staging
so long as the cover letter identifies where the series is based.  A lot
of development already occurs in this way on xen-devel.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:54         ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 14:54 UTC (permalink / raw)
  To: Andrew Cooper, Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/29/19 3:49 PM, Andrew Cooper wrote:
> On 29/04/2019 15:46, George Dunlap wrote:
>> On 4/29/19 3:32 PM, George Dunlap wrote:
>>> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
>>>> Calling _put_page_type while also holding the page_lock
>>>> for that page can cause a deadlock.
>>>>
>>>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
>>>> Cc: Jan Beulich <jbeulich@suse.com>
>>>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>>>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>>>> Cc: Wei Liu <wei.liu2@citrix.com>
>>>> Cc: Roger Pau Monne <roger.pau@citrix.com>
>>>> ---
>>>> v3: simplified patch by keeping the additional references already in-place
>>>> ---
>>>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>>>>  1 file changed, 10 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>>>> index dfc279d371..e2f74ac770 100644
>>>> --- a/xen/arch/x86/mm/mem_sharing.c
>>>> +++ b/xen/arch/x86/mm/mem_sharing.c
>>>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>>          return -EBUSY;
>>>>      }
>>>>  
>>>> -    /* We can only change the type if count is one */
>>>> -    /* Because we are locking pages individually, we need to drop
>>>> -     * the lock here, while the page is typed. We cannot risk the 
>>>> -     * race of page_unlock and then put_page_type. */
>>>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>>>>      if ( page->u.inuse.type_info != expected_type )
>>>>      {
>>>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>>          return -EEXIST;
>>>>      }
>>>>  
>>>> -    /* Drop the final typecount */
>>>> -    put_page_and_type(page);
>>>> -
>>>>      /* Now that we've dropped the type, we can unlock */
>>>>      mem_sharing_page_unlock(page);
>>>>  
>>>> +    /* Drop the final typecount */
>>>> +    put_page_and_type(page);
>>>> +
>>>>      /* Change the owner */
>>>>      ASSERT(page_get_owner(page) == dom_cow);
>>>>      page_set_owner(page, d);
>>>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>>      p2m_type_t smfn_type, cmfn_type;
>>>>      struct two_gfns tg;
>>>>      struct rmap_iterator ri;
>>>> +    unsigned long put_count = 0;
>>>>  
>>>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>>>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
>>>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>>           * Don't change the type of rmap for the client page. */
>>>>          rmap_del(gfn, cpage, 0);
>>>>          rmap_add(gfn, spage);
>>>> -        put_page_and_type(cpage);
>>>> +        put_count++;
>>>>          d = get_domain_by_id(gfn->domain);
>>>>          BUG_ON(!d);
>>>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
>>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>>      mem_sharing_page_unlock(secondpg);
>>>>      mem_sharing_page_unlock(firstpg);
>>>>  
>>>> +    BUG_ON(!put_count);
>>>> +    while ( put_count-- )
>>>> +        put_page_and_type(cpage);
>>>> +
>>>>      /* Free the client page */
>>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>>>          put_page(cpage);
>>>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>>>>      {
>>>>          if ( !last_gfn )
>>>>              mem_sharing_gfn_destroy(page, d, gfn_info);
>>>> -        put_page_and_type(page);
>>>>          mem_sharing_page_unlock(page);
>>>> +        put_page_and_type(page);
>>>>          if ( last_gfn )
>>>>          {
>>>>              if ( !get_page(page, dom_cow) )
>>> ...Probably should have mentioned that this needs to be applied after
>>> your other patch. :-)
>> Hmm -- actually, the base appears to be a non-publicly-available tree
>> (Andy's private x86-next).
> 
> Its perfectly public
> 
> http://xenbits.xen.org/gitweb/?p=people/andrewcoop/xen.git;a=shortlog;h=refs/heads/x86-next

OK, didn't realize this.

> 
> and has been in effect during commit freezes for the past several years
> worth of releases.
> 
> It is fine to post a tree based on something other than xen.git/staging
> so long as the cover letter identifies where the series is based.  A lot
> of development already occurs in this way on xen-devel.

I agree it's fine when a URL is provided.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 14:54         ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 14:54 UTC (permalink / raw)
  To: Andrew Cooper, Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/29/19 3:49 PM, Andrew Cooper wrote:
> On 29/04/2019 15:46, George Dunlap wrote:
>> On 4/29/19 3:32 PM, George Dunlap wrote:
>>> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
>>>> Calling _put_page_type while also holding the page_lock
>>>> for that page can cause a deadlock.
>>>>
>>>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
>>>> Cc: Jan Beulich <jbeulich@suse.com>
>>>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>>>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>>>> Cc: Wei Liu <wei.liu2@citrix.com>
>>>> Cc: Roger Pau Monne <roger.pau@citrix.com>
>>>> ---
>>>> v3: simplified patch by keeping the additional references already in-place
>>>> ---
>>>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>>>>  1 file changed, 10 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>>>> index dfc279d371..e2f74ac770 100644
>>>> --- a/xen/arch/x86/mm/mem_sharing.c
>>>> +++ b/xen/arch/x86/mm/mem_sharing.c
>>>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>>          return -EBUSY;
>>>>      }
>>>>  
>>>> -    /* We can only change the type if count is one */
>>>> -    /* Because we are locking pages individually, we need to drop
>>>> -     * the lock here, while the page is typed. We cannot risk the 
>>>> -     * race of page_unlock and then put_page_type. */
>>>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
>>>>      if ( page->u.inuse.type_info != expected_type )
>>>>      {
>>>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
>>>>          return -EEXIST;
>>>>      }
>>>>  
>>>> -    /* Drop the final typecount */
>>>> -    put_page_and_type(page);
>>>> -
>>>>      /* Now that we've dropped the type, we can unlock */
>>>>      mem_sharing_page_unlock(page);
>>>>  
>>>> +    /* Drop the final typecount */
>>>> +    put_page_and_type(page);
>>>> +
>>>>      /* Change the owner */
>>>>      ASSERT(page_get_owner(page) == dom_cow);
>>>>      page_set_owner(page, d);
>>>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>>      p2m_type_t smfn_type, cmfn_type;
>>>>      struct two_gfns tg;
>>>>      struct rmap_iterator ri;
>>>> +    unsigned long put_count = 0;
>>>>  
>>>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
>>>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
>>>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>>           * Don't change the type of rmap for the client page. */
>>>>          rmap_del(gfn, cpage, 0);
>>>>          rmap_add(gfn, spage);
>>>> -        put_page_and_type(cpage);
>>>> +        put_count++;
>>>>          d = get_domain_by_id(gfn->domain);
>>>>          BUG_ON(!d);
>>>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
>>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>>      mem_sharing_page_unlock(secondpg);
>>>>      mem_sharing_page_unlock(firstpg);
>>>>  
>>>> +    BUG_ON(!put_count);
>>>> +    while ( put_count-- )
>>>> +        put_page_and_type(cpage);
>>>> +
>>>>      /* Free the client page */
>>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>>>          put_page(cpage);
>>>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
>>>>      {
>>>>          if ( !last_gfn )
>>>>              mem_sharing_gfn_destroy(page, d, gfn_info);
>>>> -        put_page_and_type(page);
>>>>          mem_sharing_page_unlock(page);
>>>> +        put_page_and_type(page);
>>>>          if ( last_gfn )
>>>>          {
>>>>              if ( !get_page(page, dom_cow) )
>>> ...Probably should have mentioned that this needs to be applied after
>>> your other patch. :-)
>> Hmm -- actually, the base appears to be a non-publicly-available tree
>> (Andy's private x86-next).
> 
> Its perfectly public
> 
> http://xenbits.xen.org/gitweb/?p=people/andrewcoop/xen.git;a=shortlog;h=refs/heads/x86-next

OK, didn't realize this.

> 
> and has been in effect during commit freezes for the past several years
> worth of releases.
> 
> It is fine to post a tree based on something other than xen.git/staging
> so long as the cover letter identifies where the series is based.  A lot
> of development already occurs in this way on xen-devel.

I agree it's fine when a URL is provided.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds
@ 2019-04-29 14:57     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 14:57 UTC (permalink / raw)
  To: Tamas K Lengyel; +Cc: Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> --- a/xen/include/asm-x86/mem_sharing.h
> +++ b/xen/include/asm-x86/mem_sharing.h
> @@ -25,7 +25,9 @@
>  #include <public/memory.h>
>  
>  /* Auditing of memory sharing code? */
> +#ifndef NDEBUG
>  #define MEM_SHARING_AUDIT 1
> +#endif

Since consumers use #if (not #ifdef), I think an #else would
be on order here, even if (I think) not strictly necessary with
gcc.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds
@ 2019-04-29 14:57     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 14:57 UTC (permalink / raw)
  To: Tamas K Lengyel; +Cc: Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> --- a/xen/include/asm-x86/mem_sharing.h
> +++ b/xen/include/asm-x86/mem_sharing.h
> @@ -25,7 +25,9 @@
>  #include <public/memory.h>
>  
>  /* Auditing of memory sharing code? */
> +#ifndef NDEBUG
>  #define MEM_SHARING_AUDIT 1
> +#endif

Since consumers use #if (not #ifdef), I think an #else would
be on order here, even if (I think) not strictly necessary with
gcc.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:01   ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 15:01 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>      mem_sharing_page_unlock(secondpg);
>      mem_sharing_page_unlock(firstpg);
>  
> +    BUG_ON(!put_count);
> +    while ( put_count-- )
> +        put_page_and_type(cpage);
> +
>      /* Free the client page */
>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>          put_page(cpage);

If this was to happen before all the put_page_and_type() calls,
wouldn't it render unnecessary the extra get_page()/put_page()
around this code region?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:01   ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 15:01 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>      mem_sharing_page_unlock(secondpg);
>      mem_sharing_page_unlock(firstpg);
>  
> +    BUG_ON(!put_count);
> +    while ( put_count-- )
> +        put_page_and_type(cpage);
> +
>      /* Free the client page */
>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>          put_page(cpage);

If this was to happen before all the put_page_and_type() calls,
wouldn't it render unnecessary the extra get_page()/put_page()
around this code region?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-29 15:18     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 15:18 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
> ordering" added extra sanity checking to page_lock/page_unlock for debug builds
> with the assumption that no hypervisor path ever locks two pages at once.
> 
> This assumption doesn't hold during memory sharing so we introduce separate
> functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
> in the memory sharing subsystem.

Completely bypassing these checks looks undesirable to me, to
be honest. Otoh as discussed mem-sharing is abusing the lock
anyway. So if this is to remain that way, I wonder whether the
better course of action wouldn't be to stop abusing the
functions, cloning them to continue using the PGT_locked flag.
I would then wonder whether e.g. the checking of PGT_validated
would actually be needed in the cloned functions - that's again
a flag which ought to have meaning mainly (exclusively?) for
handling of PV guests.

> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
>  #define current_locked_page_ne_check(x) true
>  #endif
>  
> -int page_lock(struct page_info *page)
> +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> +static int _page_lock(struct page_info *page)

As per above, personally I'm against introducing
page_{,un}lock_memshr(), as that makes the abuse even more
look like proper use. But if this was to be kept this way, may I
ask that you switch int -> bool in the return types at this occasion?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-29 15:18     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 15:18 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
> ordering" added extra sanity checking to page_lock/page_unlock for debug builds
> with the assumption that no hypervisor path ever locks two pages at once.
> 
> This assumption doesn't hold during memory sharing so we introduce separate
> functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
> in the memory sharing subsystem.

Completely bypassing these checks looks undesirable to me, to
be honest. Otoh as discussed mem-sharing is abusing the lock
anyway. So if this is to remain that way, I wonder whether the
better course of action wouldn't be to stop abusing the
functions, cloning them to continue using the PGT_locked flag.
I would then wonder whether e.g. the checking of PGT_validated
would actually be needed in the cloned functions - that's again
a flag which ought to have meaning mainly (exclusively?) for
handling of PV guests.

> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
>  #define current_locked_page_ne_check(x) true
>  #endif
>  
> -int page_lock(struct page_info *page)
> +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> +static int _page_lock(struct page_info *page)

As per above, personally I'm against introducing
page_{,un}lock_memshr(), as that makes the abuse even more
look like proper use. But if this was to be kept this way, may I
ask that you switch int -> bool in the return types at this occasion?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds
@ 2019-04-29 15:31       ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:31 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 8:57 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > --- a/xen/include/asm-x86/mem_sharing.h
> > +++ b/xen/include/asm-x86/mem_sharing.h
> > @@ -25,7 +25,9 @@
> >  #include <public/memory.h>
> >
> >  /* Auditing of memory sharing code? */
> > +#ifndef NDEBUG
> >  #define MEM_SHARING_AUDIT 1
> > +#endif
>
> Since consumers use #if (not #ifdef), I think an #else would
> be on order here, even if (I think) not strictly necessary with
> gcc.

Sure.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds
@ 2019-04-29 15:31       ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:31 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 8:57 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > --- a/xen/include/asm-x86/mem_sharing.h
> > +++ b/xen/include/asm-x86/mem_sharing.h
> > @@ -25,7 +25,9 @@
> >  #include <public/memory.h>
> >
> >  /* Auditing of memory sharing code? */
> > +#ifndef NDEBUG
> >  #define MEM_SHARING_AUDIT 1
> > +#endif
>
> Since consumers use #if (not #ifdef), I think an #else would
> be on order here, even if (I think) not strictly necessary with
> gcc.

Sure.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled
@ 2019-04-29 15:32     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 15:32 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Stefano Stabellini, Wei Liu, Konrad Rzeszutek Wilk,
	George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	Julien Grall, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> --- a/xen/arch/x86/Kconfig
> +++ b/xen/arch/x86/Kconfig
> @@ -17,7 +17,6 @@ config X86
>  	select HAS_KEXEC
>  	select MEM_ACCESS_ALWAYS_ON
>  	select HAS_MEM_PAGING
> -	select HAS_MEM_SHARING
>  	select HAS_NS16550
>  	select HAS_PASSTHROUGH
>  	select HAS_PCI
> @@ -198,6 +197,13 @@ config PV_SHIM_EXCLUSIVE
>  	  firmware, and will not function correctly in other scenarios.
>  
>  	  If unsure, say N.
> +
> +config MEM_SHARING
> +    bool
> +    default n
> +    depends on HVM
> +    prompt "Xen memory sharing support" if EXPERT = "y"
> +

As per all the context above, please use proper (tab) indentation.
Also please omit the pointless "default n". And then the "bool"
and "prompt ..." can (and hence should) be combined into a
single line.

I also wonder whether this shouldn't be in common/Kconfig, but
then again it can be moved there if Arm would ever gain
mem-sharing support.

> @@ -98,4 +100,33 @@ void mem_sharing_init(void);
>   */
>  int relinquish_shared_pages(struct domain *d);
>  
> +#else
> +
> +static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
> +{
> +    return 0;
> +}
> +static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
> +{
> +    return 0;
> +}

I follow you for these.

> +static inline int mem_sharing_unshare_page(struct domain *d,
> +                                           unsigned long gfn,
> +                                           uint16_t flags)
> +{
> +    return 0;
> +}

But shouldn't this one (just as an example, there may be more
below) return -EOPNOTSUPP?

And in general - if these inline functions are needed, shouldn't
all of them (or at least some) gain ASSERT_UNREACHABLE(), as we
do elsewhere?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled
@ 2019-04-29 15:32     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 15:32 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Stefano Stabellini, Wei Liu, Konrad Rzeszutek Wilk,
	George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	Julien Grall, xen-devel, Roger Pau Monne

>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> --- a/xen/arch/x86/Kconfig
> +++ b/xen/arch/x86/Kconfig
> @@ -17,7 +17,6 @@ config X86
>  	select HAS_KEXEC
>  	select MEM_ACCESS_ALWAYS_ON
>  	select HAS_MEM_PAGING
> -	select HAS_MEM_SHARING
>  	select HAS_NS16550
>  	select HAS_PASSTHROUGH
>  	select HAS_PCI
> @@ -198,6 +197,13 @@ config PV_SHIM_EXCLUSIVE
>  	  firmware, and will not function correctly in other scenarios.
>  
>  	  If unsure, say N.
> +
> +config MEM_SHARING
> +    bool
> +    default n
> +    depends on HVM
> +    prompt "Xen memory sharing support" if EXPERT = "y"
> +

As per all the context above, please use proper (tab) indentation.
Also please omit the pointless "default n". And then the "bool"
and "prompt ..." can (and hence should) be combined into a
single line.

I also wonder whether this shouldn't be in common/Kconfig, but
then again it can be moved there if Arm would ever gain
mem-sharing support.

> @@ -98,4 +100,33 @@ void mem_sharing_init(void);
>   */
>  int relinquish_shared_pages(struct domain *d);
>  
> +#else
> +
> +static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
> +{
> +    return 0;
> +}
> +static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
> +{
> +    return 0;
> +}

I follow you for these.

> +static inline int mem_sharing_unshare_page(struct domain *d,
> +                                           unsigned long gfn,
> +                                           uint16_t flags)
> +{
> +    return 0;
> +}

But shouldn't this one (just as an example, there may be more
below) return -EOPNOTSUPP?

And in general - if these inline functions are needed, shouldn't
all of them (or at least some) gain ASSERT_UNREACHABLE(), as we
do elsewhere?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:37           ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:37 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, Xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 8:54 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/29/19 3:49 PM, Andrew Cooper wrote:
> > On 29/04/2019 15:46, George Dunlap wrote:
> >> On 4/29/19 3:32 PM, George Dunlap wrote:
> >>> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> >>>> Calling _put_page_type while also holding the page_lock
> >>>> for that page can cause a deadlock.
> >>>>
> >>>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> >>>> Cc: Jan Beulich <jbeulich@suse.com>
> >>>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> >>>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> >>>> Cc: Wei Liu <wei.liu2@citrix.com>
> >>>> Cc: Roger Pau Monne <roger.pau@citrix.com>
> >>>> ---
> >>>> v3: simplified patch by keeping the additional references already in-place
> >>>> ---
> >>>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
> >>>>  1 file changed, 10 insertions(+), 9 deletions(-)
> >>>>
> >>>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> >>>> index dfc279d371..e2f74ac770 100644
> >>>> --- a/xen/arch/x86/mm/mem_sharing.c
> >>>> +++ b/xen/arch/x86/mm/mem_sharing.c
> >>>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
> >>>>          return -EBUSY;
> >>>>      }
> >>>>
> >>>> -    /* We can only change the type if count is one */
> >>>> -    /* Because we are locking pages individually, we need to drop
> >>>> -     * the lock here, while the page is typed. We cannot risk the
> >>>> -     * race of page_unlock and then put_page_type. */
> >>>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
> >>>>      if ( page->u.inuse.type_info != expected_type )
> >>>>      {
> >>>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
> >>>>          return -EEXIST;
> >>>>      }
> >>>>
> >>>> -    /* Drop the final typecount */
> >>>> -    put_page_and_type(page);
> >>>> -
> >>>>      /* Now that we've dropped the type, we can unlock */
> >>>>      mem_sharing_page_unlock(page);
> >>>>
> >>>> +    /* Drop the final typecount */
> >>>> +    put_page_and_type(page);
> >>>> +
> >>>>      /* Change the owner */
> >>>>      ASSERT(page_get_owner(page) == dom_cow);
> >>>>      page_set_owner(page, d);
> >>>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>>      p2m_type_t smfn_type, cmfn_type;
> >>>>      struct two_gfns tg;
> >>>>      struct rmap_iterator ri;
> >>>> +    unsigned long put_count = 0;
> >>>>
> >>>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
> >>>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
> >>>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>>           * Don't change the type of rmap for the client page. */
> >>>>          rmap_del(gfn, cpage, 0);
> >>>>          rmap_add(gfn, spage);
> >>>> -        put_page_and_type(cpage);
> >>>> +        put_count++;
> >>>>          d = get_domain_by_id(gfn->domain);
> >>>>          BUG_ON(!d);
> >>>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
> >>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>>      mem_sharing_page_unlock(secondpg);
> >>>>      mem_sharing_page_unlock(firstpg);
> >>>>
> >>>> +    BUG_ON(!put_count);
> >>>> +    while ( put_count-- )
> >>>> +        put_page_and_type(cpage);
> >>>> +
> >>>>      /* Free the client page */
> >>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
> >>>>          put_page(cpage);
> >>>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
> >>>>      {
> >>>>          if ( !last_gfn )
> >>>>              mem_sharing_gfn_destroy(page, d, gfn_info);
> >>>> -        put_page_and_type(page);
> >>>>          mem_sharing_page_unlock(page);
> >>>> +        put_page_and_type(page);
> >>>>          if ( last_gfn )
> >>>>          {
> >>>>              if ( !get_page(page, dom_cow) )
> >>> ...Probably should have mentioned that this needs to be applied after
> >>> your other patch. :-)
> >> Hmm -- actually, the base appears to be a non-publicly-available tree
> >> (Andy's private x86-next).
> >
> > Its perfectly public
> >
> > http://xenbits.xen.org/gitweb/?p=people/andrewcoop/xen.git;a=shortlog;h=refs/heads/x86-next
>
> OK, didn't realize this.
>
> >
> > and has been in effect during commit freezes for the past several years
> > worth of releases.
> >
> > It is fine to post a tree based on something other than xen.git/staging
> > so long as the cover letter identifies where the series is based.  A lot
> > of development already occurs in this way on xen-devel.
>
> I agree it's fine when a URL is provided.
>

Yeap, sorry, should have pointed out I rebased on x86-next so I don't
keep sending patches that are already pulled in.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:37           ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:37 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, Xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 8:54 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/29/19 3:49 PM, Andrew Cooper wrote:
> > On 29/04/2019 15:46, George Dunlap wrote:
> >> On 4/29/19 3:32 PM, George Dunlap wrote:
> >>> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> >>>> Calling _put_page_type while also holding the page_lock
> >>>> for that page can cause a deadlock.
> >>>>
> >>>> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> >>>> Cc: Jan Beulich <jbeulich@suse.com>
> >>>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> >>>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> >>>> Cc: Wei Liu <wei.liu2@citrix.com>
> >>>> Cc: Roger Pau Monne <roger.pau@citrix.com>
> >>>> ---
> >>>> v3: simplified patch by keeping the additional references already in-place
> >>>> ---
> >>>>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
> >>>>  1 file changed, 10 insertions(+), 9 deletions(-)
> >>>>
> >>>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> >>>> index dfc279d371..e2f74ac770 100644
> >>>> --- a/xen/arch/x86/mm/mem_sharing.c
> >>>> +++ b/xen/arch/x86/mm/mem_sharing.c
> >>>> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
> >>>>          return -EBUSY;
> >>>>      }
> >>>>
> >>>> -    /* We can only change the type if count is one */
> >>>> -    /* Because we are locking pages individually, we need to drop
> >>>> -     * the lock here, while the page is typed. We cannot risk the
> >>>> -     * race of page_unlock and then put_page_type. */
> >>>>      expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
> >>>>      if ( page->u.inuse.type_info != expected_type )
> >>>>      {
> >>>> @@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct page_info *page)
> >>>>          return -EEXIST;
> >>>>      }
> >>>>
> >>>> -    /* Drop the final typecount */
> >>>> -    put_page_and_type(page);
> >>>> -
> >>>>      /* Now that we've dropped the type, we can unlock */
> >>>>      mem_sharing_page_unlock(page);
> >>>>
> >>>> +    /* Drop the final typecount */
> >>>> +    put_page_and_type(page);
> >>>> +
> >>>>      /* Change the owner */
> >>>>      ASSERT(page_get_owner(page) == dom_cow);
> >>>>      page_set_owner(page, d);
> >>>> @@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>>      p2m_type_t smfn_type, cmfn_type;
> >>>>      struct two_gfns tg;
> >>>>      struct rmap_iterator ri;
> >>>> +    unsigned long put_count = 0;
> >>>>
> >>>>      get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
> >>>>                   cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
> >>>> @@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>>           * Don't change the type of rmap for the client page. */
> >>>>          rmap_del(gfn, cpage, 0);
> >>>>          rmap_add(gfn, spage);
> >>>> -        put_page_and_type(cpage);
> >>>> +        put_count++;
> >>>>          d = get_domain_by_id(gfn->domain);
> >>>>          BUG_ON(!d);
> >>>>          BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
> >>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>>      mem_sharing_page_unlock(secondpg);
> >>>>      mem_sharing_page_unlock(firstpg);
> >>>>
> >>>> +    BUG_ON(!put_count);
> >>>> +    while ( put_count-- )
> >>>> +        put_page_and_type(cpage);
> >>>> +
> >>>>      /* Free the client page */
> >>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
> >>>>          put_page(cpage);
> >>>> @@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
> >>>>      {
> >>>>          if ( !last_gfn )
> >>>>              mem_sharing_gfn_destroy(page, d, gfn_info);
> >>>> -        put_page_and_type(page);
> >>>>          mem_sharing_page_unlock(page);
> >>>> +        put_page_and_type(page);
> >>>>          if ( last_gfn )
> >>>>          {
> >>>>              if ( !get_page(page, dom_cow) )
> >>> ...Probably should have mentioned that this needs to be applied after
> >>> your other patch. :-)
> >> Hmm -- actually, the base appears to be a non-publicly-available tree
> >> (Andy's private x86-next).
> >
> > Its perfectly public
> >
> > http://xenbits.xen.org/gitweb/?p=people/andrewcoop/xen.git;a=shortlog;h=refs/heads/x86-next
>
> OK, didn't realize this.
>
> >
> > and has been in effect during commit freezes for the past several years
> > worth of releases.
> >
> > It is fine to post a tree based on something other than xen.git/staging
> > so long as the cover letter identifies where the series is based.  A lot
> > of development already occurs in this way on xen-devel.
>
> I agree it's fine when a URL is provided.
>

Yeap, sorry, should have pointed out I rebased on x86-next so I don't
keep sending patches that are already pulled in.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:41     ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:41 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 9:01 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >      mem_sharing_page_unlock(secondpg);
> >      mem_sharing_page_unlock(firstpg);
> >
> > +    BUG_ON(!put_count);
> > +    while ( put_count-- )
> > +        put_page_and_type(cpage);
> > +
> >      /* Free the client page */
> >      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
> >          put_page(cpage);
>
> If this was to happen before all the put_page_and_type() calls,
> wouldn't it render unnecessary the extra get_page()/put_page()
> around this code region?

It would - I already sent a version of the patch in that form but
there was unease expressed by George going that route because of the
complexity of the code and in his view it's the safe bet to keep the
extra references. I think the overhead of grabbing the extra
references is negligible enough that I'm not going to argue over it.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:41     ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:41 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 9:01 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >      mem_sharing_page_unlock(secondpg);
> >      mem_sharing_page_unlock(firstpg);
> >
> > +    BUG_ON(!put_count);
> > +    while ( put_count-- )
> > +        put_page_and_type(cpage);
> > +
> >      /* Free the client page */
> >      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
> >          put_page(cpage);
>
> If this was to happen before all the put_page_and_type() calls,
> wouldn't it render unnecessary the extra get_page()/put_page()
> around this code region?

It would - I already sent a version of the patch in that form but
there was unease expressed by George going that route because of the
complexity of the code and in his view it's the safe bet to keep the
extra references. I think the overhead of grabbing the extra
references is negligible enough that I'm not going to argue over it.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:44   ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 15:44 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> Calling _put_page_type while also holding the page_lock
> for that page can cause a deadlock.

I can't immediately see what the mem_sharing_page_[un]lock() functions
are meant to be protecting against.  Is there a comment anywhere
describing what they're used for or how they work?

Because...

> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Roger Pau Monne <roger.pau@citrix.com>
> ---
> v3: simplified patch by keeping the additional references already in-place
> ---
>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index dfc279d371..e2f74ac770 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>          return -EBUSY;
>      }
>  
> -    /* We can only change the type if count is one */
> -    /* Because we are locking pages individually, we need to drop
> -     * the lock here, while the page is typed. We cannot risk the 
> -     * race of page_unlock and then put_page_type. */

...the comment you're removing explicitly says that what you're doing is
"risk"-y.; but you don't explain at all why the comment is wrong.

Ultimately you're the maintainer, and this is non-security-supported, so
if you insist it's safe, I won't oppose it; but it seems like having
some clarity about what is or is not risky and why would be helpful.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:44   ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 15:44 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, Jan Beulich, Roger Pau Monne

On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> Calling _put_page_type while also holding the page_lock
> for that page can cause a deadlock.

I can't immediately see what the mem_sharing_page_[un]lock() functions
are meant to be protecting against.  Is there a comment anywhere
describing what they're used for or how they work?

Because...

> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: Roger Pau Monne <roger.pau@citrix.com>
> ---
> v3: simplified patch by keeping the additional references already in-place
> ---
>  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index dfc279d371..e2f74ac770 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
>          return -EBUSY;
>      }
>  
> -    /* We can only change the type if count is one */
> -    /* Because we are locking pages individually, we need to drop
> -     * the lock here, while the page is typed. We cannot risk the 
> -     * race of page_unlock and then put_page_type. */

...the comment you're removing explicitly says that what you're doing is
"risk"-y.; but you don't explain at all why the comment is wrong.

Ultimately you're the maintainer, and this is non-security-supported, so
if you insist it's safe, I won't oppose it; but it seems like having
some clarity about what is or is not risky and why would be helpful.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled
@ 2019-04-29 15:49       ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:49 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, Konrad Rzeszutek Wilk,
	George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	Julien Grall, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 9:32 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > --- a/xen/arch/x86/Kconfig
> > +++ b/xen/arch/x86/Kconfig
> > @@ -17,7 +17,6 @@ config X86
> >       select HAS_KEXEC
> >       select MEM_ACCESS_ALWAYS_ON
> >       select HAS_MEM_PAGING
> > -     select HAS_MEM_SHARING
> >       select HAS_NS16550
> >       select HAS_PASSTHROUGH
> >       select HAS_PCI
> > @@ -198,6 +197,13 @@ config PV_SHIM_EXCLUSIVE
> >         firmware, and will not function correctly in other scenarios.
> >
> >         If unsure, say N.
> > +
> > +config MEM_SHARING
> > +    bool
> > +    default n
> > +    depends on HVM
> > +    prompt "Xen memory sharing support" if EXPERT = "y"
> > +
>
> As per all the context above, please use proper (tab) indentation.
> Also please omit the pointless "default n". And then the "bool"
> and "prompt ..." can (and hence should) be combined into a
> single line.

Sure.

>
> I also wonder whether this shouldn't be in common/Kconfig, but
> then again it can be moved there if Arm would ever gain
> mem-sharing support.

It is currently only supported for x86 HVM guests. There are no plans
for adding ARM support. If and when that happens, it could be moved to
common. I don't expect that will ever happen.

>
> > @@ -98,4 +100,33 @@ void mem_sharing_init(void);
> >   */
> >  int relinquish_shared_pages(struct domain *d);
> >
> > +#else
> > +
> > +static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
> > +{
> > +    return 0;
> > +}
> > +static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
> > +{
> > +    return 0;
> > +}
>
> I follow you for these.
>
> > +static inline int mem_sharing_unshare_page(struct domain *d,
> > +                                           unsigned long gfn,
> > +                                           uint16_t flags)
> > +{
> > +    return 0;
> > +}
>
> But shouldn't this one (just as an example, there may be more
> below) return -EOPNOTSUPP?

It really doesn't matter. No caller ever reaches this function when
mem_sharing is compiled out since it's gated on the page type being
p2m_ram_shared. You can't set that page type when the memop/domctl to
set it is gone.

>
> And in general - if these inline functions are needed, shouldn't
> all of them (or at least some) gain ASSERT_UNREACHABLE(), as we
> do elsewhere?

Yes, since unshare_page is unreachable adding that assert would be
appropriate. It would probably be appropriate for the others too
(except get_nr_saved/shared_mfns).

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled
@ 2019-04-29 15:49       ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:49 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Stefano Stabellini, Wei Liu, Konrad Rzeszutek Wilk,
	George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	Julien Grall, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 9:32 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > --- a/xen/arch/x86/Kconfig
> > +++ b/xen/arch/x86/Kconfig
> > @@ -17,7 +17,6 @@ config X86
> >       select HAS_KEXEC
> >       select MEM_ACCESS_ALWAYS_ON
> >       select HAS_MEM_PAGING
> > -     select HAS_MEM_SHARING
> >       select HAS_NS16550
> >       select HAS_PASSTHROUGH
> >       select HAS_PCI
> > @@ -198,6 +197,13 @@ config PV_SHIM_EXCLUSIVE
> >         firmware, and will not function correctly in other scenarios.
> >
> >         If unsure, say N.
> > +
> > +config MEM_SHARING
> > +    bool
> > +    default n
> > +    depends on HVM
> > +    prompt "Xen memory sharing support" if EXPERT = "y"
> > +
>
> As per all the context above, please use proper (tab) indentation.
> Also please omit the pointless "default n". And then the "bool"
> and "prompt ..." can (and hence should) be combined into a
> single line.

Sure.

>
> I also wonder whether this shouldn't be in common/Kconfig, but
> then again it can be moved there if Arm would ever gain
> mem-sharing support.

It is currently only supported for x86 HVM guests. There are no plans
for adding ARM support. If and when that happens, it could be moved to
common. I don't expect that will ever happen.

>
> > @@ -98,4 +100,33 @@ void mem_sharing_init(void);
> >   */
> >  int relinquish_shared_pages(struct domain *d);
> >
> > +#else
> > +
> > +static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
> > +{
> > +    return 0;
> > +}
> > +static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
> > +{
> > +    return 0;
> > +}
>
> I follow you for these.
>
> > +static inline int mem_sharing_unshare_page(struct domain *d,
> > +                                           unsigned long gfn,
> > +                                           uint16_t flags)
> > +{
> > +    return 0;
> > +}
>
> But shouldn't this one (just as an example, there may be more
> below) return -EOPNOTSUPP?

It really doesn't matter. No caller ever reaches this function when
mem_sharing is compiled out since it's gated on the page type being
p2m_ram_shared. You can't set that page type when the memop/domctl to
set it is gone.

>
> And in general - if these inline functions are needed, shouldn't
> all of them (or at least some) gain ASSERT_UNREACHABLE(), as we
> do elsewhere?

Yes, since unshare_page is unreachable adding that assert would be
appropriate. It would probably be appropriate for the others too
(except get_nr_saved/shared_mfns).

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:52       ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 15:52 UTC (permalink / raw)
  To: Tamas K Lengyel, Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 4:41 PM, Tamas K Lengyel wrote:
> On Mon, Apr 29, 2019 at 9:01 AM Jan Beulich <JBeulich@suse.com> wrote:
>>
>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>      mem_sharing_page_unlock(secondpg);
>>>      mem_sharing_page_unlock(firstpg);
>>>
>>> +    BUG_ON(!put_count);
>>> +    while ( put_count-- )
>>> +        put_page_and_type(cpage);
>>> +
>>>      /* Free the client page */
>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>>          put_page(cpage);
>>
>> If this was to happen before all the put_page_and_type() calls,
>> wouldn't it render unnecessary the extra get_page()/put_page()
>> around this code region?
> 
> It would - I already sent a version of the patch in that form but
> there was unease expressed by George going that route because of the
> complexity of the code and in his view it's the safe bet to keep the
> extra references. I think the overhead of grabbing the extra
> references is negligible enough that I'm not going to argue over it.

Er, that's not what I said. :-)

I gave four possible options, *one* of which was to change the ASSERT()
to a BUG_ON(), and *one* of which was to keep the (probably redundant)
get_page() and put_page() calls.  You appear to have done both. :-)

I haven't re-grokked the code here, but assuming I was correct 2 weeks
ago, if you have the BUG_ON() there, you can get rid of the extra
references.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:52       ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 15:52 UTC (permalink / raw)
  To: Tamas K Lengyel, Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 4:41 PM, Tamas K Lengyel wrote:
> On Mon, Apr 29, 2019 at 9:01 AM Jan Beulich <JBeulich@suse.com> wrote:
>>
>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
>>>      mem_sharing_page_unlock(secondpg);
>>>      mem_sharing_page_unlock(firstpg);
>>>
>>> +    BUG_ON(!put_count);
>>> +    while ( put_count-- )
>>> +        put_page_and_type(cpage);
>>> +
>>>      /* Free the client page */
>>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
>>>          put_page(cpage);
>>
>> If this was to happen before all the put_page_and_type() calls,
>> wouldn't it render unnecessary the extra get_page()/put_page()
>> around this code region?
> 
> It would - I already sent a version of the patch in that form but
> there was unease expressed by George going that route because of the
> complexity of the code and in his view it's the safe bet to keep the
> extra references. I think the overhead of grabbing the extra
> references is negligible enough that I'm not going to argue over it.

Er, that's not what I said. :-)

I gave four possible options, *one* of which was to change the ASSERT()
to a BUG_ON(), and *one* of which was to keep the (probably redundant)
get_page() and put_page() calls.  You appear to have done both. :-)

I haven't re-grokked the code here, but assuming I was correct 2 weeks
ago, if you have the BUG_ON() there, you can get rid of the extra
references.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:58     ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:58 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, Xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 9:44 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> > Calling _put_page_type while also holding the page_lock
> > for that page can cause a deadlock.
>
> I can't immediately see what the mem_sharing_page_[un]lock() functions
> are meant to be protecting against.  Is there a comment anywhere
> describing what they're used for or how they work?

There are none. The whole subsystem and its of page_lock/unlock use is
undocumented. The lock is used to protect the page_sharing_info
structure from simultaneous updates (when the page is shared/unshared)
and also to make freeing that structure safe.

>
> Because...
>
> > Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> > Cc: Jan Beulich <jbeulich@suse.com>
> > Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> > Cc: George Dunlap <george.dunlap@eu.citrix.com>
> > Cc: Wei Liu <wei.liu2@citrix.com>
> > Cc: Roger Pau Monne <roger.pau@citrix.com>
> > ---
> > v3: simplified patch by keeping the additional references already in-place
> > ---
> >  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
> >  1 file changed, 10 insertions(+), 9 deletions(-)
> >
> > diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> > index dfc279d371..e2f74ac770 100644
> > --- a/xen/arch/x86/mm/mem_sharing.c
> > +++ b/xen/arch/x86/mm/mem_sharing.c
> > @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
> >          return -EBUSY;
> >      }
> >
> > -    /* We can only change the type if count is one */
> > -    /* Because we are locking pages individually, we need to drop
> > -     * the lock here, while the page is typed. We cannot risk the
> > -     * race of page_unlock and then put_page_type. */
>
> ...the comment you're removing explicitly says that what you're doing is
> "risk"-y.; but you don't explain at all why the comment is wrong.

AFAICT that comment was correct when it was added but since then it is
out-of-date. There is now an explicit switch case added to
_put_page_type that seem to handle this situation (case PGT_locked |
1) that was not there when this comment was introduced. That's what I
was able to decipher using git archeology. IMHO the whole
page_lock/put_type and reference counting is undecipherable (at least
I'm no closer to understanding it after staring at this for 2 weeks
now) and I wish there was a way to just getting rid of the whole thing
but unfortunately there doesn't seem to be one due to not being able
to easily grow page_info.

>
> Ultimately you're the maintainer, and this is non-security-supported, so
> if you insist it's safe, I won't oppose it; but it seems like having
> some clarity about what is or is not risky and why would be helpful.

I'm just trying to revive the subsystem as the way things are right
now it's broken. Doing this reorder makes the deadlock go away.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 15:58     ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 15:58 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, Xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 9:44 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/26/19 6:21 PM, Tamas K Lengyel wrote:
> > Calling _put_page_type while also holding the page_lock
> > for that page can cause a deadlock.
>
> I can't immediately see what the mem_sharing_page_[un]lock() functions
> are meant to be protecting against.  Is there a comment anywhere
> describing what they're used for or how they work?

There are none. The whole subsystem and its of page_lock/unlock use is
undocumented. The lock is used to protect the page_sharing_info
structure from simultaneous updates (when the page is shared/unshared)
and also to make freeing that structure safe.

>
> Because...
>
> > Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> > Cc: Jan Beulich <jbeulich@suse.com>
> > Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> > Cc: George Dunlap <george.dunlap@eu.citrix.com>
> > Cc: Wei Liu <wei.liu2@citrix.com>
> > Cc: Roger Pau Monne <roger.pau@citrix.com>
> > ---
> > v3: simplified patch by keeping the additional references already in-place
> > ---
> >  xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
> >  1 file changed, 10 insertions(+), 9 deletions(-)
> >
> > diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> > index dfc279d371..e2f74ac770 100644
> > --- a/xen/arch/x86/mm/mem_sharing.c
> > +++ b/xen/arch/x86/mm/mem_sharing.c
> > @@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
> >          return -EBUSY;
> >      }
> >
> > -    /* We can only change the type if count is one */
> > -    /* Because we are locking pages individually, we need to drop
> > -     * the lock here, while the page is typed. We cannot risk the
> > -     * race of page_unlock and then put_page_type. */
>
> ...the comment you're removing explicitly says that what you're doing is
> "risk"-y.; but you don't explain at all why the comment is wrong.

AFAICT that comment was correct when it was added but since then it is
out-of-date. There is now an explicit switch case added to
_put_page_type that seem to handle this situation (case PGT_locked |
1) that was not there when this comment was introduced. That's what I
was able to decipher using git archeology. IMHO the whole
page_lock/put_type and reference counting is undecipherable (at least
I'm no closer to understanding it after staring at this for 2 weeks
now) and I wish there was a way to just getting rid of the whole thing
but unfortunately there doesn't seem to be one due to not being able
to easily grow page_info.

>
> Ultimately you're the maintainer, and this is non-security-supported, so
> if you insist it's safe, I won't oppose it; but it seems like having
> some clarity about what is or is not risky and why would be helpful.

I'm just trying to revive the subsystem as the way things are right
now it's broken. Doing this reorder makes the deadlock go away.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:05         ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:05 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/29/19 4:41 PM, Tamas K Lengyel wrote:
> > On Mon, Apr 29, 2019 at 9:01 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>
> >>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>      mem_sharing_page_unlock(secondpg);
> >>>      mem_sharing_page_unlock(firstpg);
> >>>
> >>> +    BUG_ON(!put_count);
> >>> +    while ( put_count-- )
> >>> +        put_page_and_type(cpage);
> >>> +
> >>>      /* Free the client page */
> >>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
> >>>          put_page(cpage);
> >>
> >> If this was to happen before all the put_page_and_type() calls,
> >> wouldn't it render unnecessary the extra get_page()/put_page()
> >> around this code region?
> >
> > It would - I already sent a version of the patch in that form but
> > there was unease expressed by George going that route because of the
> > complexity of the code and in his view it's the safe bet to keep the
> > extra references. I think the overhead of grabbing the extra
> > references is negligible enough that I'm not going to argue over it.
>
> Er, that's not what I said. :-)
>
> I gave four possible options, *one* of which was to change the ASSERT()
> to a BUG_ON(), and *one* of which was to keep the (probably redundant)
> get_page() and put_page() calls.  You appear to have done both. :-)

Ah indeed - I've combined those recommendations :)

>
> I haven't re-grokked the code here, but assuming I was correct 2 weeks
> ago, if you have the BUG_ON() there, you can get rid of the extra
> references.
>

Sure, but again, the overhead of having them in-place is negligible so
might as well just keep it.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:05         ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:05 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/29/19 4:41 PM, Tamas K Lengyel wrote:
> > On Mon, Apr 29, 2019 at 9:01 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>
> >>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >>> @@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
> >>>      mem_sharing_page_unlock(secondpg);
> >>>      mem_sharing_page_unlock(firstpg);
> >>>
> >>> +    BUG_ON(!put_count);
> >>> +    while ( put_count-- )
> >>> +        put_page_and_type(cpage);
> >>> +
> >>>      /* Free the client page */
> >>>      if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
> >>>          put_page(cpage);
> >>
> >> If this was to happen before all the put_page_and_type() calls,
> >> wouldn't it render unnecessary the extra get_page()/put_page()
> >> around this code region?
> >
> > It would - I already sent a version of the patch in that form but
> > there was unease expressed by George going that route because of the
> > complexity of the code and in his view it's the safe bet to keep the
> > extra references. I think the overhead of grabbing the extra
> > references is negligible enough that I'm not going to argue over it.
>
> Er, that's not what I said. :-)
>
> I gave four possible options, *one* of which was to change the ASSERT()
> to a BUG_ON(), and *one* of which was to keep the (probably redundant)
> get_page() and put_page() calls.  You appear to have done both. :-)

Ah indeed - I've combined those recommendations :)

>
> I haven't re-grokked the code here, but assuming I was correct 2 weeks
> ago, if you have the BUG_ON() there, you can get rid of the extra
> references.
>

Sure, but again, the overhead of having them in-place is negligible so
might as well just keep it.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:14           ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 16:14 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Wei Liu, George Dunlap, Andrew Cooper, george.dunlap, xen-devel,
	Roger Pau Monne

>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
>> ago, if you have the BUG_ON() there, you can get rid of the extra
>> references.
> 
> Sure, but again, the overhead of having them in-place is negligible so
> might as well just keep it.

The overhead is only one aspect here. People looking at the code
may also be mislead into trying to figure out why the heck this
extra reference gets obtained. Plus sub-optimal code tends to get
cloned ...

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:14           ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-29 16:14 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Wei Liu, George Dunlap, Andrew Cooper, george.dunlap, xen-devel,
	Roger Pau Monne

>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
>> ago, if you have the BUG_ON() there, you can get rid of the extra
>> references.
> 
> Sure, but again, the overhead of having them in-place is negligible so
> might as well just keep it.

The overhead is only one aspect here. People looking at the code
may also be mislead into trying to figure out why the heck this
extra reference gets obtained. Plus sub-optimal code tends to get
cloned ...

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:22             ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 16:22 UTC (permalink / raw)
  To: Jan Beulich, Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 5:14 PM, Jan Beulich wrote:
>>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
>> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
>>> ago, if you have the BUG_ON() there, you can get rid of the extra
>>> references.
>>
>> Sure, but again, the overhead of having them in-place is negligible so
>> might as well just keep it.
> 
> The overhead is only one aspect here. People looking at the code
> may also be mislead into trying to figure out why the heck this
> extra reference gets obtained. Plus sub-optimal code tends to get
> cloned ...

I think it's better not to have duplicate measures as well, but I don't
want to argue too much over it.

At very least, there should be a comment saying that the get_page()
shouldn't be necessary.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:22             ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 16:22 UTC (permalink / raw)
  To: Jan Beulich, Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 5:14 PM, Jan Beulich wrote:
>>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
>> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
>>> ago, if you have the BUG_ON() there, you can get rid of the extra
>>> references.
>>
>> Sure, but again, the overhead of having them in-place is negligible so
>> might as well just keep it.
> 
> The overhead is only one aspect here. People looking at the code
> may also be mislead into trying to figure out why the heck this
> extra reference gets obtained. Plus sub-optimal code tends to get
> cloned ...

I think it's better not to have duplicate measures as well, but I don't
want to argue too much over it.

At very least, there should be a comment saying that the get_page()
shouldn't be necessary.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:26             ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:26 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Wei Liu, George Dunlap, Andrew Cooper, George Dunlap, xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 10:14 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
> > On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
> >> I haven't re-grokked the code here, but assuming I was correct 2 weeks
> >> ago, if you have the BUG_ON() there, you can get rid of the extra
> >> references.
> >
> > Sure, but again, the overhead of having them in-place is negligible so
> > might as well just keep it.
>
> The overhead is only one aspect here. People looking at the code
> may also be mislead into trying to figure out why the heck this
> extra reference gets obtained. Plus sub-optimal code tends to get
> cloned ...

Yea, I'm with you.. Alright, in that case Andrew pulled in that
previous patch into x86-next for no good reason as that whole thing is
going to get dropped now. Andrew - if you can just drop that patch
from x86-next I'll rebase on staging and resend without that patch.

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:26             ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:26 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Wei Liu, George Dunlap, Andrew Cooper, George Dunlap, xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 10:14 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
> > On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
> >> I haven't re-grokked the code here, but assuming I was correct 2 weeks
> >> ago, if you have the BUG_ON() there, you can get rid of the extra
> >> references.
> >
> > Sure, but again, the overhead of having them in-place is negligible so
> > might as well just keep it.
>
> The overhead is only one aspect here. People looking at the code
> may also be mislead into trying to figure out why the heck this
> extra reference gets obtained. Plus sub-optimal code tends to get
> cloned ...

Yea, I'm with you.. Alright, in that case Andrew pulled in that
previous patch into x86-next for no good reason as that whole thing is
going to get dropped now. Andrew - if you can just drop that patch
from x86-next I'll rebase on staging and resend without that patch.

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:29               ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 16:29 UTC (permalink / raw)
  To: Tamas K Lengyel, Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 5:26 PM, Tamas K Lengyel wrote:
> On Mon, Apr 29, 2019 at 10:14 AM Jan Beulich <JBeulich@suse.com> wrote:
>>
>>>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
>>> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>>>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
>>>> ago, if you have the BUG_ON() there, you can get rid of the extra
>>>> references.
>>>
>>> Sure, but again, the overhead of having them in-place is negligible so
>>> might as well just keep it.
>>
>> The overhead is only one aspect here. People looking at the code
>> may also be mislead into trying to figure out why the heck this
>> extra reference gets obtained. Plus sub-optimal code tends to get
>> cloned ...
> 
> Yea, I'm with you.. Alright, in that case Andrew pulled in that
> previous patch into x86-next for no good reason as that whole thing is
> going to get dropped now. Andrew - if you can just drop that patch
> from x86-next I'll rebase on staging and resend without that patch.

I assume he wants that branch to be fast-forwarding; if so, he can't
really pull it out.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:29               ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 16:29 UTC (permalink / raw)
  To: Tamas K Lengyel, Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 5:26 PM, Tamas K Lengyel wrote:
> On Mon, Apr 29, 2019 at 10:14 AM Jan Beulich <JBeulich@suse.com> wrote:
>>
>>>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
>>> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
>>>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
>>>> ago, if you have the BUG_ON() there, you can get rid of the extra
>>>> references.
>>>
>>> Sure, but again, the overhead of having them in-place is negligible so
>>> might as well just keep it.
>>
>> The overhead is only one aspect here. People looking at the code
>> may also be mislead into trying to figure out why the heck this
>> extra reference gets obtained. Plus sub-optimal code tends to get
>> cloned ...
> 
> Yea, I'm with you.. Alright, in that case Andrew pulled in that
> previous patch into x86-next for no good reason as that whole thing is
> going to get dropped now. Andrew - if you can just drop that patch
> from x86-next I'll rebase on staging and resend without that patch.

I assume he wants that branch to be fast-forwarding; if so, he can't
really pull it out.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-29 16:35       ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:35 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
> > ordering" added extra sanity checking to page_lock/page_unlock for debug builds
> > with the assumption that no hypervisor path ever locks two pages at once.
> >
> > This assumption doesn't hold during memory sharing so we introduce separate
> > functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
> > in the memory sharing subsystem.
>
> Completely bypassing these checks looks undesirable to me, to
> be honest. Otoh as discussed mem-sharing is abusing the lock
> anyway. So if this is to remain that way, I wonder whether the
> better course of action wouldn't be to stop abusing the
> functions, cloning them to continue using the PGT_locked flag.
> I would then wonder whether e.g. the checking of PGT_validated
> would actually be needed in the cloned functions - that's again
> a flag which ought to have meaning mainly (exclusively?) for
> handling of PV guests.

I don't have an answer to your question. I would prefer getting rid of
this whole thing myself. For now I'm just trying to make it usable
again.

>
> > --- a/xen/arch/x86/mm.c
> > +++ b/xen/arch/x86/mm.c
> > @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
> >  #define current_locked_page_ne_check(x) true
> >  #endif
> >
> > -int page_lock(struct page_info *page)
> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> > +static int _page_lock(struct page_info *page)
>
> As per above, personally I'm against introducing
> page_{,un}lock_memshr(), as that makes the abuse even more
> look like proper use. But if this was to be kept this way, may I
> ask that you switch int -> bool in the return types at this occasion?

Switching them to bool would be fine. Replacing them with something
saner is unfortunately out-of-scope at the moment. Unless someone has
a specific solution that can be put in place. I don't have one.

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-29 16:35       ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:35 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> > Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
> > ordering" added extra sanity checking to page_lock/page_unlock for debug builds
> > with the assumption that no hypervisor path ever locks two pages at once.
> >
> > This assumption doesn't hold during memory sharing so we introduce separate
> > functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
> > in the memory sharing subsystem.
>
> Completely bypassing these checks looks undesirable to me, to
> be honest. Otoh as discussed mem-sharing is abusing the lock
> anyway. So if this is to remain that way, I wonder whether the
> better course of action wouldn't be to stop abusing the
> functions, cloning them to continue using the PGT_locked flag.
> I would then wonder whether e.g. the checking of PGT_validated
> would actually be needed in the cloned functions - that's again
> a flag which ought to have meaning mainly (exclusively?) for
> handling of PV guests.

I don't have an answer to your question. I would prefer getting rid of
this whole thing myself. For now I'm just trying to make it usable
again.

>
> > --- a/xen/arch/x86/mm.c
> > +++ b/xen/arch/x86/mm.c
> > @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
> >  #define current_locked_page_ne_check(x) true
> >  #endif
> >
> > -int page_lock(struct page_info *page)
> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> > +static int _page_lock(struct page_info *page)
>
> As per above, personally I'm against introducing
> page_{,un}lock_memshr(), as that makes the abuse even more
> look like proper use. But if this was to be kept this way, may I
> ask that you switch int -> bool in the return types at this occasion?

Switching them to bool would be fine. Replacing them with something
saner is unfortunately out-of-scope at the moment. Unless someone has
a specific solution that can be put in place. I don't have one.

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:36                 ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:36 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 10:29 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/29/19 5:26 PM, Tamas K Lengyel wrote:
> > On Mon, Apr 29, 2019 at 10:14 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>
> >>>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
> >>> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
> >>>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
> >>>> ago, if you have the BUG_ON() there, you can get rid of the extra
> >>>> references.
> >>>
> >>> Sure, but again, the overhead of having them in-place is negligible so
> >>> might as well just keep it.
> >>
> >> The overhead is only one aspect here. People looking at the code
> >> may also be mislead into trying to figure out why the heck this
> >> extra reference gets obtained. Plus sub-optimal code tends to get
> >> cloned ...
> >
> > Yea, I'm with you.. Alright, in that case Andrew pulled in that
> > previous patch into x86-next for no good reason as that whole thing is
> > going to get dropped now. Andrew - if you can just drop that patch
> > from x86-next I'll rebase on staging and resend without that patch.
>
> I assume he wants that branch to be fast-forwarding; if so, he can't
> really pull it out.
>

Yea, figured.. Oh well, not really a big deal.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released
@ 2019-04-29 16:36                 ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-29 16:36 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, xen-devel,
	Roger Pau Monne

On Mon, Apr 29, 2019 at 10:29 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/29/19 5:26 PM, Tamas K Lengyel wrote:
> > On Mon, Apr 29, 2019 at 10:14 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>
> >>>>> On 29.04.19 at 18:05, <tamas@tklengyel.com> wrote:
> >>> On Mon, Apr 29, 2019 at 9:52 AM George Dunlap <george.dunlap@citrix.com> wrote:
> >>>> I haven't re-grokked the code here, but assuming I was correct 2 weeks
> >>>> ago, if you have the BUG_ON() there, you can get rid of the extra
> >>>> references.
> >>>
> >>> Sure, but again, the overhead of having them in-place is negligible so
> >>> might as well just keep it.
> >>
> >> The overhead is only one aspect here. People looking at the code
> >> may also be mislead into trying to figure out why the heck this
> >> extra reference gets obtained. Plus sub-optimal code tends to get
> >> cloned ...
> >
> > Yea, I'm with you.. Alright, in that case Andrew pulled in that
> > previous patch into x86-next for no good reason as that whole thing is
> > going to get dropped now. Andrew - if you can just drop that patch
> > from x86-next I'll rebase on staging and resend without that patch.
>
> I assume he wants that branch to be fast-forwarding; if so, he can't
> really pull it out.
>

Yea, figured.. Oh well, not really a big deal.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-29 16:42       ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 16:42 UTC (permalink / raw)
  To: Jan Beulich, Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 4:18 PM, Jan Beulich wrote:
>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>> Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
>> ordering" added extra sanity checking to page_lock/page_unlock for debug builds
>> with the assumption that no hypervisor path ever locks two pages at once.
>>
>> This assumption doesn't hold during memory sharing so we introduce separate
>> functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
>> in the memory sharing subsystem.
> 
> Completely bypassing these checks looks undesirable to me, to
> be honest. Otoh as discussed mem-sharing is abusing the lock
> anyway.

I'm not sure what you mean by "abusing"; would it be any different if
page_struct() had a spinlock element called "page_lock", that was only
used by PV guests?

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-29 16:42       ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-29 16:42 UTC (permalink / raw)
  To: Jan Beulich, Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/29/19 4:18 PM, Jan Beulich wrote:
>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>> Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
>> ordering" added extra sanity checking to page_lock/page_unlock for debug builds
>> with the assumption that no hypervisor path ever locks two pages at once.
>>
>> This assumption doesn't hold during memory sharing so we introduce separate
>> functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
>> in the memory sharing subsystem.
> 
> Completely bypassing these checks looks undesirable to me, to
> be honest. Otoh as discussed mem-sharing is abusing the lock
> anyway.

I'm not sure what you mean by "abusing"; would it be any different if
page_struct() had a spinlock element called "page_lock", that was only
used by PV guests?

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  7:13         ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30  7:13 UTC (permalink / raw)
  To: george.dunlap
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

>>> On 29.04.19 at 18:42, <george.dunlap@citrix.com> wrote:
> On 4/29/19 4:18 PM, Jan Beulich wrote:
>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>>> Patch cf4b30dca0a "Add debug code to detect illegal page_lock and 
> put_page_type
>>> ordering" added extra sanity checking to page_lock/page_unlock for debug 
> builds
>>> with the assumption that no hypervisor path ever locks two pages at once.
>>>
>>> This assumption doesn't hold during memory sharing so we introduce separate
>>> functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
>>> in the memory sharing subsystem.
>> 
>> Completely bypassing these checks looks undesirable to me, to
>> be honest. Otoh as discussed mem-sharing is abusing the lock
>> anyway.
> 
> I'm not sure what you mean by "abusing"; would it be any different if
> page_struct() had a spinlock element called "page_lock", that was only
> used by PV guests?

No, it wouldn't. By "abusing" I mean it is using something for HVM
which is meant to be used for PV only. It is mem-sharing's use alone
which prevents page_{,un}lock() to be put inside #ifdef CONFIG_PV.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  7:13         ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30  7:13 UTC (permalink / raw)
  To: george.dunlap
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

>>> On 29.04.19 at 18:42, <george.dunlap@citrix.com> wrote:
> On 4/29/19 4:18 PM, Jan Beulich wrote:
>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>>> Patch cf4b30dca0a "Add debug code to detect illegal page_lock and 
> put_page_type
>>> ordering" added extra sanity checking to page_lock/page_unlock for debug 
> builds
>>> with the assumption that no hypervisor path ever locks two pages at once.
>>>
>>> This assumption doesn't hold during memory sharing so we introduce separate
>>> functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
>>> in the memory sharing subsystem.
>> 
>> Completely bypassing these checks looks undesirable to me, to
>> be honest. Otoh as discussed mem-sharing is abusing the lock
>> anyway.
> 
> I'm not sure what you mean by "abusing"; would it be any different if
> page_struct() had a spinlock element called "page_lock", that was only
> used by PV guests?

No, it wouldn't. By "abusing" I mean it is using something for HVM
which is meant to be used for PV only. It is mem-sharing's use alone
which prevents page_{,un}lock() to be put inside #ifdef CONFIG_PV.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  7:15         ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30  7:15 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>> > --- a/xen/arch/x86/mm.c
>> > +++ b/xen/arch/x86/mm.c
>> > @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
>> >  #define current_locked_page_ne_check(x) true
>> >  #endif
>> >
>> > -int page_lock(struct page_info *page)
>> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
>> > +static int _page_lock(struct page_info *page)
>>
>> As per above, personally I'm against introducing
>> page_{,un}lock_memshr(), as that makes the abuse even more
>> look like proper use. But if this was to be kept this way, may I
>> ask that you switch int -> bool in the return types at this occasion?
> 
> Switching them to bool would be fine. Replacing them with something
> saner is unfortunately out-of-scope at the moment. Unless someone has
> a specific solution that can be put in place. I don't have one.

I've outlined a solution already: Make a mem-sharing private variant
of page_{,un}lock(), derived from the PV ones (but with pieces
dropped you don't want/need).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  7:15         ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30  7:15 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>> > --- a/xen/arch/x86/mm.c
>> > +++ b/xen/arch/x86/mm.c
>> > @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
>> >  #define current_locked_page_ne_check(x) true
>> >  #endif
>> >
>> > -int page_lock(struct page_info *page)
>> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
>> > +static int _page_lock(struct page_info *page)
>>
>> As per above, personally I'm against introducing
>> page_{,un}lock_memshr(), as that makes the abuse even more
>> look like proper use. But if this was to be kept this way, may I
>> ask that you switch int -> bool in the return types at this occasion?
> 
> Switching them to bool would be fine. Replacing them with something
> saner is unfortunately out-of-scope at the moment. Unless someone has
> a specific solution that can be put in place. I don't have one.

I've outlined a solution already: Make a mem-sharing private variant
of page_{,un}lock(), derived from the PV ones (but with pieces
dropped you don't want/need).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  8:28           ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-30  8:28 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> > On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
> >> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >> > --- a/xen/arch/x86/mm.c
> >> > +++ b/xen/arch/x86/mm.c
> >> > @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
> >> >  #define current_locked_page_ne_check(x) true
> >> >  #endif
> >> >
> >> > -int page_lock(struct page_info *page)
> >> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> >> > +static int _page_lock(struct page_info *page)
> >>
> >> As per above, personally I'm against introducing
> >> page_{,un}lock_memshr(), as that makes the abuse even more
> >> look like proper use. But if this was to be kept this way, may I
> >> ask that you switch int -> bool in the return types at this occasion?
> >
> > Switching them to bool would be fine. Replacing them with something
> > saner is unfortunately out-of-scope at the moment. Unless someone has
> > a specific solution that can be put in place. I don't have one.
>
> I've outlined a solution already: Make a mem-sharing private variant
> of page_{,un}lock(), derived from the PV ones (but with pieces
> dropped you don't want/need).

Well, that's what I already did here in this patch. No?

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  8:28           ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-30  8:28 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> > On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
> >> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >> > --- a/xen/arch/x86/mm.c
> >> > +++ b/xen/arch/x86/mm.c
> >> > @@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
> >> >  #define current_locked_page_ne_check(x) true
> >> >  #endif
> >> >
> >> > -int page_lock(struct page_info *page)
> >> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> >> > +static int _page_lock(struct page_info *page)
> >>
> >> As per above, personally I'm against introducing
> >> page_{,un}lock_memshr(), as that makes the abuse even more
> >> look like proper use. But if this was to be kept this way, may I
> >> ask that you switch int -> bool in the return types at this occasion?
> >
> > Switching them to bool would be fine. Replacing them with something
> > saner is unfortunately out-of-scope at the moment. Unless someone has
> > a specific solution that can be put in place. I don't have one.
>
> I've outlined a solution already: Make a mem-sharing private variant
> of page_{,un}lock(), derived from the PV ones (but with pieces
> dropped you don't want/need).

Well, that's what I already did here in this patch. No?

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  8:44             ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30  8:44 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>
>> >>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
>> > On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>> >> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>> >> > --- a/xen/arch/x86/mm.c
>> >> > +++ b/xen/arch/x86/mm.c
>> >> > @@ -2030,12 +2030,11 @@ static inline bool 
> current_locked_page_ne_check(struct page_info *page) {
>> >> >  #define current_locked_page_ne_check(x) true
>> >> >  #endif
>> >> >
>> >> > -int page_lock(struct page_info *page)
>> >> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
>> >> > +static int _page_lock(struct page_info *page)
>> >>
>> >> As per above, personally I'm against introducing
>> >> page_{,un}lock_memshr(), as that makes the abuse even more
>> >> look like proper use. But if this was to be kept this way, may I
>> >> ask that you switch int -> bool in the return types at this occasion?
>> >
>> > Switching them to bool would be fine. Replacing them with something
>> > saner is unfortunately out-of-scope at the moment. Unless someone has
>> > a specific solution that can be put in place. I don't have one.
>>
>> I've outlined a solution already: Make a mem-sharing private variant
>> of page_{,un}lock(), derived from the PV ones (but with pieces
>> dropped you don't want/need).
> 
> Well, that's what I already did here in this patch. No?

No - you've retained a shared _page_{,un}lock(), whereas my
suggestion was to have a completely independent pair of
functions in mem_sharing.c. The only thing needed by both PV
and HVM would then be the PGT_locked flag.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30  8:44             ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30  8:44 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>
>> >>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
>> > On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>> >> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>> >> > --- a/xen/arch/x86/mm.c
>> >> > +++ b/xen/arch/x86/mm.c
>> >> > @@ -2030,12 +2030,11 @@ static inline bool 
> current_locked_page_ne_check(struct page_info *page) {
>> >> >  #define current_locked_page_ne_check(x) true
>> >> >  #endif
>> >> >
>> >> > -int page_lock(struct page_info *page)
>> >> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
>> >> > +static int _page_lock(struct page_info *page)
>> >>
>> >> As per above, personally I'm against introducing
>> >> page_{,un}lock_memshr(), as that makes the abuse even more
>> >> look like proper use. But if this was to be kept this way, may I
>> >> ask that you switch int -> bool in the return types at this occasion?
>> >
>> > Switching them to bool would be fine. Replacing them with something
>> > saner is unfortunately out-of-scope at the moment. Unless someone has
>> > a specific solution that can be put in place. I don't have one.
>>
>> I've outlined a solution already: Make a mem-sharing private variant
>> of page_{,un}lock(), derived from the PV ones (but with pieces
>> dropped you don't want/need).
> 
> Well, that's what I already did here in this patch. No?

No - you've retained a shared _page_{,un}lock(), whereas my
suggestion was to have a completely independent pair of
functions in mem_sharing.c. The only thing needed by both PV
and HVM would then be the PGT_locked flag.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 14:19               ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-30 14:19 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Tue, Apr 30, 2019 at 2:44 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
> > On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>
> >> >>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> >> > On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
> >> >> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >> >> > --- a/xen/arch/x86/mm.c
> >> >> > +++ b/xen/arch/x86/mm.c
> >> >> > @@ -2030,12 +2030,11 @@ static inline bool
> > current_locked_page_ne_check(struct page_info *page) {
> >> >> >  #define current_locked_page_ne_check(x) true
> >> >> >  #endif
> >> >> >
> >> >> > -int page_lock(struct page_info *page)
> >> >> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> >> >> > +static int _page_lock(struct page_info *page)
> >> >>
> >> >> As per above, personally I'm against introducing
> >> >> page_{,un}lock_memshr(), as that makes the abuse even more
> >> >> look like proper use. But if this was to be kept this way, may I
> >> >> ask that you switch int -> bool in the return types at this occasion?
> >> >
> >> > Switching them to bool would be fine. Replacing them with something
> >> > saner is unfortunately out-of-scope at the moment. Unless someone has
> >> > a specific solution that can be put in place. I don't have one.
> >>
> >> I've outlined a solution already: Make a mem-sharing private variant
> >> of page_{,un}lock(), derived from the PV ones (but with pieces
> >> dropped you don't want/need).
> >
> > Well, that's what I already did here in this patch. No?
>
> No - you've retained a shared _page_{,un}lock(), whereas my
> suggestion was to have a completely independent pair of
> functions in mem_sharing.c. The only thing needed by both PV
> and HVM would then be the PGT_locked flag.

I see. Sure.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 14:19               ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-30 14:19 UTC (permalink / raw)
  To: Jan Beulich
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On Tue, Apr 30, 2019 at 2:44 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
> > On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>
> >> >>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> >> > On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
> >> >> >>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >> >> > --- a/xen/arch/x86/mm.c
> >> >> > +++ b/xen/arch/x86/mm.c
> >> >> > @@ -2030,12 +2030,11 @@ static inline bool
> > current_locked_page_ne_check(struct page_info *page) {
> >> >> >  #define current_locked_page_ne_check(x) true
> >> >> >  #endif
> >> >> >
> >> >> > -int page_lock(struct page_info *page)
> >> >> > +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> >> >> > +static int _page_lock(struct page_info *page)
> >> >>
> >> >> As per above, personally I'm against introducing
> >> >> page_{,un}lock_memshr(), as that makes the abuse even more
> >> >> look like proper use. But if this was to be kept this way, may I
> >> >> ask that you switch int -> bool in the return types at this occasion?
> >> >
> >> > Switching them to bool would be fine. Replacing them with something
> >> > saner is unfortunately out-of-scope at the moment. Unless someone has
> >> > a specific solution that can be put in place. I don't have one.
> >>
> >> I've outlined a solution already: Make a mem-sharing private variant
> >> of page_{,un}lock(), derived from the PV ones (but with pieces
> >> dropped you don't want/need).
> >
> > Well, that's what I already did here in this patch. No?
>
> No - you've retained a shared _page_{,un}lock(), whereas my
> suggestion was to have a completely independent pair of
> functions in mem_sharing.c. The only thing needed by both PV
> and HVM would then be the PGT_locked flag.

I see. Sure.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 14:43               ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-30 14:43 UTC (permalink / raw)
  To: Jan Beulich, Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>
>>>>>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
>>>> On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>>>>>> --- a/xen/arch/x86/mm.c
>>>>>> +++ b/xen/arch/x86/mm.c
>>>>>> @@ -2030,12 +2030,11 @@ static inline bool 
>> current_locked_page_ne_check(struct page_info *page) {
>>>>>>  #define current_locked_page_ne_check(x) true
>>>>>>  #endif
>>>>>>
>>>>>> -int page_lock(struct page_info *page)
>>>>>> +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
>>>>>> +static int _page_lock(struct page_info *page)
>>>>>
>>>>> As per above, personally I'm against introducing
>>>>> page_{,un}lock_memshr(), as that makes the abuse even more
>>>>> look like proper use. But if this was to be kept this way, may I
>>>>> ask that you switch int -> bool in the return types at this occasion?
>>>>
>>>> Switching them to bool would be fine. Replacing them with something
>>>> saner is unfortunately out-of-scope at the moment. Unless someone has
>>>> a specific solution that can be put in place. I don't have one.
>>>
>>> I've outlined a solution already: Make a mem-sharing private variant
>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>> dropped you don't want/need).
>>
>> Well, that's what I already did here in this patch. No?
> 
> No - you've retained a shared _page_{,un}lock(), whereas my
> suggestion was to have a completely independent pair of
> functions in mem_sharing.c. The only thing needed by both PV
> and HVM would then be the PGT_locked flag.

But it wasn't obvious to me how the implementations of the actual lock
function would be be different.  And there's no point in having two
identical implementations; in fact, it would be harmful.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 14:43               ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-30 14:43 UTC (permalink / raw)
  To: Jan Beulich, Tamas K Lengyel
  Cc: George Dunlap, Andrew Cooper, Wei Liu, xen-devel, Roger Pau Monne

On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>
>>>>>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
>>>> On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
>>>>>> --- a/xen/arch/x86/mm.c
>>>>>> +++ b/xen/arch/x86/mm.c
>>>>>> @@ -2030,12 +2030,11 @@ static inline bool 
>> current_locked_page_ne_check(struct page_info *page) {
>>>>>>  #define current_locked_page_ne_check(x) true
>>>>>>  #endif
>>>>>>
>>>>>> -int page_lock(struct page_info *page)
>>>>>> +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
>>>>>> +static int _page_lock(struct page_info *page)
>>>>>
>>>>> As per above, personally I'm against introducing
>>>>> page_{,un}lock_memshr(), as that makes the abuse even more
>>>>> look like proper use. But if this was to be kept this way, may I
>>>>> ask that you switch int -> bool in the return types at this occasion?
>>>>
>>>> Switching them to bool would be fine. Replacing them with something
>>>> saner is unfortunately out-of-scope at the moment. Unless someone has
>>>> a specific solution that can be put in place. I don't have one.
>>>
>>> I've outlined a solution already: Make a mem-sharing private variant
>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>> dropped you don't want/need).
>>
>> Well, that's what I already did here in this patch. No?
> 
> No - you've retained a shared _page_{,un}lock(), whereas my
> suggestion was to have a completely independent pair of
> functions in mem_sharing.c. The only thing needed by both PV
> and HVM would then be the PGT_locked flag.

But it wasn't obvious to me how the implementations of the actual lock
function would be be different.  And there's no point in having two
identical implementations; in fact, it would be harmful.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 14:46                 ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-30 14:46 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, xen-devel,
	Roger Pau Monne

On Tue, Apr 30, 2019 at 8:43 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/30/19 9:44 AM, Jan Beulich wrote:
> >>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
> >> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>>
> >>>>>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> >>>> On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>>>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >>>>>> --- a/xen/arch/x86/mm.c
> >>>>>> +++ b/xen/arch/x86/mm.c
> >>>>>> @@ -2030,12 +2030,11 @@ static inline bool
> >> current_locked_page_ne_check(struct page_info *page) {
> >>>>>>  #define current_locked_page_ne_check(x) true
> >>>>>>  #endif
> >>>>>>
> >>>>>> -int page_lock(struct page_info *page)
> >>>>>> +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> >>>>>> +static int _page_lock(struct page_info *page)
> >>>>>
> >>>>> As per above, personally I'm against introducing
> >>>>> page_{,un}lock_memshr(), as that makes the abuse even more
> >>>>> look like proper use. But if this was to be kept this way, may I
> >>>>> ask that you switch int -> bool in the return types at this occasion?
> >>>>
> >>>> Switching them to bool would be fine. Replacing them with something
> >>>> saner is unfortunately out-of-scope at the moment. Unless someone has
> >>>> a specific solution that can be put in place. I don't have one.
> >>>
> >>> I've outlined a solution already: Make a mem-sharing private variant
> >>> of page_{,un}lock(), derived from the PV ones (but with pieces
> >>> dropped you don't want/need).
> >>
> >> Well, that's what I already did here in this patch. No?
> >
> > No - you've retained a shared _page_{,un}lock(), whereas my
> > suggestion was to have a completely independent pair of
> > functions in mem_sharing.c. The only thing needed by both PV
> > and HVM would then be the PGT_locked flag.
>
> But it wasn't obvious to me how the implementations of the actual lock
> function would be be different.  And there's no point in having two
> identical implementations; in fact, it would be harmful.

I also think it's wasteful and an invitation for future breakage. But
right now I just want the functions working without them intentionally
crashing the hypervisor under me - which is the case right now.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 14:46                 ` Tamas K Lengyel
  0 siblings, 0 replies; 74+ messages in thread
From: Tamas K Lengyel @ 2019-04-30 14:46 UTC (permalink / raw)
  To: George Dunlap
  Cc: Wei Liu, George Dunlap, Andrew Cooper, Jan Beulich, xen-devel,
	Roger Pau Monne

On Tue, Apr 30, 2019 at 8:43 AM George Dunlap <george.dunlap@citrix.com> wrote:
>
> On 4/30/19 9:44 AM, Jan Beulich wrote:
> >>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
> >> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>>
> >>>>>> On 29.04.19 at 18:35, <tamas@tklengyel.com> wrote:
> >>>> On Mon, Apr 29, 2019 at 9:18 AM Jan Beulich <JBeulich@suse.com> wrote:
> >>>>>>>> On 26.04.19 at 19:21, <tamas@tklengyel.com> wrote:
> >>>>>> --- a/xen/arch/x86/mm.c
> >>>>>> +++ b/xen/arch/x86/mm.c
> >>>>>> @@ -2030,12 +2030,11 @@ static inline bool
> >> current_locked_page_ne_check(struct page_info *page) {
> >>>>>>  #define current_locked_page_ne_check(x) true
> >>>>>>  #endif
> >>>>>>
> >>>>>> -int page_lock(struct page_info *page)
> >>>>>> +#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
> >>>>>> +static int _page_lock(struct page_info *page)
> >>>>>
> >>>>> As per above, personally I'm against introducing
> >>>>> page_{,un}lock_memshr(), as that makes the abuse even more
> >>>>> look like proper use. But if this was to be kept this way, may I
> >>>>> ask that you switch int -> bool in the return types at this occasion?
> >>>>
> >>>> Switching them to bool would be fine. Replacing them with something
> >>>> saner is unfortunately out-of-scope at the moment. Unless someone has
> >>>> a specific solution that can be put in place. I don't have one.
> >>>
> >>> I've outlined a solution already: Make a mem-sharing private variant
> >>> of page_{,un}lock(), derived from the PV ones (but with pieces
> >>> dropped you don't want/need).
> >>
> >> Well, that's what I already did here in this patch. No?
> >
> > No - you've retained a shared _page_{,un}lock(), whereas my
> > suggestion was to have a completely independent pair of
> > functions in mem_sharing.c. The only thing needed by both PV
> > and HVM would then be the PGT_locked flag.
>
> But it wasn't obvious to me how the implementations of the actual lock
> function would be be different.  And there's no point in having two
> identical implementations; in fact, it would be harmful.

I also think it's wasteful and an invitation for future breakage. But
right now I just want the functions working without them intentionally
crashing the hypervisor under me - which is the case right now.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 15:06                 ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30 15:06 UTC (permalink / raw)
  To: george.dunlap
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

>>> On 30.04.19 at 16:43, <george.dunlap@citrix.com> wrote:
> On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>> I've outlined a solution already: Make a mem-sharing private variant
>>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>>> dropped you don't want/need).
>>>
>>> Well, that's what I already did here in this patch. No?
>> 
>> No - you've retained a shared _page_{,un}lock(), whereas my
>> suggestion was to have a completely independent pair of
>> functions in mem_sharing.c. The only thing needed by both PV
>> and HVM would then be the PGT_locked flag.
> 
> But it wasn't obvious to me how the implementations of the actual lock
> function would be be different.  And there's no point in having two
> identical implementations; in fact, it would be harmful.

The main difference would be the one that Tamas is after - not
doing the checking that we do for PV. Whether other bits could
be dropped for a mem-sharing special variant I don't know (yet).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 15:06                 ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-04-30 15:06 UTC (permalink / raw)
  To: george.dunlap
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

>>> On 30.04.19 at 16:43, <george.dunlap@citrix.com> wrote:
> On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>> I've outlined a solution already: Make a mem-sharing private variant
>>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>>> dropped you don't want/need).
>>>
>>> Well, that's what I already did here in this patch. No?
>> 
>> No - you've retained a shared _page_{,un}lock(), whereas my
>> suggestion was to have a completely independent pair of
>> functions in mem_sharing.c. The only thing needed by both PV
>> and HVM would then be the PGT_locked flag.
> 
> But it wasn't obvious to me how the implementations of the actual lock
> function would be be different.  And there's no point in having two
> identical implementations; in fact, it would be harmful.

The main difference would be the one that Tamas is after - not
doing the checking that we do for PV. Whether other bits could
be dropped for a mem-sharing special variant I don't know (yet).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 16:03                   ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-30 16:03 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

On 4/30/19 4:06 PM, Jan Beulich wrote:
>>>> On 30.04.19 at 16:43, <george.dunlap@citrix.com> wrote:
>> On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>>>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>>> I've outlined a solution already: Make a mem-sharing private variant
>>>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>>>> dropped you don't want/need).
>>>>
>>>> Well, that's what I already did here in this patch. No?
>>>
>>> No - you've retained a shared _page_{,un}lock(), whereas my
>>> suggestion was to have a completely independent pair of
>>> functions in mem_sharing.c. The only thing needed by both PV
>>> and HVM would then be the PGT_locked flag.
>>
>> But it wasn't obvious to me how the implementations of the actual lock
>> function would be be different.  And there's no point in having two
>> identical implementations; in fact, it would be harmful.
> 
> The main difference would be the one that Tamas is after - not
> doing the checking that we do for PV. Whether other bits could
> be dropped for a mem-sharing special variant I don't know (yet).

The "checking" being that the type count doesn't go to 0?

It's not just page_lock() that does that checking; it's also
_put_page_type().  We can't really change one but leave the other alone.

The approach I'm exploring now is to have _put_page_type() only spin on
the last page reference if the type <= PGT_l4_page_table (i.e., if
_put_final_page_type() would be called for this type).

Thoughts?

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-04-30 16:03                   ` George Dunlap
  0 siblings, 0 replies; 74+ messages in thread
From: George Dunlap @ 2019-04-30 16:03 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

On 4/30/19 4:06 PM, Jan Beulich wrote:
>>>> On 30.04.19 at 16:43, <george.dunlap@citrix.com> wrote:
>> On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>>>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>>> I've outlined a solution already: Make a mem-sharing private variant
>>>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>>>> dropped you don't want/need).
>>>>
>>>> Well, that's what I already did here in this patch. No?
>>>
>>> No - you've retained a shared _page_{,un}lock(), whereas my
>>> suggestion was to have a completely independent pair of
>>> functions in mem_sharing.c. The only thing needed by both PV
>>> and HVM would then be the PGT_locked flag.
>>
>> But it wasn't obvious to me how the implementations of the actual lock
>> function would be be different.  And there's no point in having two
>> identical implementations; in fact, it would be harmful.
> 
> The main difference would be the one that Tamas is after - not
> doing the checking that we do for PV. Whether other bits could
> be dropped for a mem-sharing special variant I don't know (yet).

The "checking" being that the type count doesn't go to 0?

It's not just page_lock() that does that checking; it's also
_put_page_type().  We can't really change one but leave the other alone.

The approach I'm exploring now is to have _put_page_type() only spin on
the last page reference if the type <= PGT_l4_page_table (i.e., if
_put_final_page_type() would be called for this type).

Thoughts?

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-05-02  7:24                     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-05-02  7:24 UTC (permalink / raw)
  To: george.dunlap
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

>>> On 30.04.19 at 18:03, <george.dunlap@citrix.com> wrote:
> On 4/30/19 4:06 PM, Jan Beulich wrote:
>>>>> On 30.04.19 at 16:43, <george.dunlap@citrix.com> wrote:
>>> On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>>>>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>>>> I've outlined a solution already: Make a mem-sharing private variant
>>>>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>>>>> dropped you don't want/need).
>>>>>
>>>>> Well, that's what I already did here in this patch. No?
>>>>
>>>> No - you've retained a shared _page_{,un}lock(), whereas my
>>>> suggestion was to have a completely independent pair of
>>>> functions in mem_sharing.c. The only thing needed by both PV
>>>> and HVM would then be the PGT_locked flag.
>>>
>>> But it wasn't obvious to me how the implementations of the actual lock
>>> function would be be different.  And there's no point in having two
>>> identical implementations; in fact, it would be harmful.
>> 
>> The main difference would be the one that Tamas is after - not
>> doing the checking that we do for PV. Whether other bits could
>> be dropped for a mem-sharing special variant I don't know (yet).
> 
> The "checking" being that the type count doesn't go to 0?
> 
> It's not just page_lock() that does that checking; it's also
> _put_page_type().  We can't really change one but leave the other alone.

No, I mean the extra debug checking (current_locked_page_*()).
See his patch as to what he keeps for mem-sharing, and what he
drops.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock
@ 2019-05-02  7:24                     ` Jan Beulich
  0 siblings, 0 replies; 74+ messages in thread
From: Jan Beulich @ 2019-05-02  7:24 UTC (permalink / raw)
  To: george.dunlap
  Cc: Tamas K Lengyel, Wei Liu, George Dunlap, Andrew Cooper,
	xen-devel, Roger Pau Monne

>>> On 30.04.19 at 18:03, <george.dunlap@citrix.com> wrote:
> On 4/30/19 4:06 PM, Jan Beulich wrote:
>>>>> On 30.04.19 at 16:43, <george.dunlap@citrix.com> wrote:
>>> On 4/30/19 9:44 AM, Jan Beulich wrote:
>>>>>>> On 30.04.19 at 10:28, <tamas@tklengyel.com> wrote:
>>>>> On Tue, Apr 30, 2019 at 1:15 AM Jan Beulich <JBeulich@suse.com> wrote:
>>>>>> I've outlined a solution already: Make a mem-sharing private variant
>>>>>> of page_{,un}lock(), derived from the PV ones (but with pieces
>>>>>> dropped you don't want/need).
>>>>>
>>>>> Well, that's what I already did here in this patch. No?
>>>>
>>>> No - you've retained a shared _page_{,un}lock(), whereas my
>>>> suggestion was to have a completely independent pair of
>>>> functions in mem_sharing.c. The only thing needed by both PV
>>>> and HVM would then be the PGT_locked flag.
>>>
>>> But it wasn't obvious to me how the implementations of the actual lock
>>> function would be be different.  And there's no point in having two
>>> identical implementations; in fact, it would be harmful.
>> 
>> The main difference would be the one that Tamas is after - not
>> doing the checking that we do for PV. Whether other bits could
>> be dropped for a mem-sharing special variant I don't know (yet).
> 
> The "checking" being that the type count doesn't go to 0?
> 
> It's not just page_lock() that does that checking; it's also
> _put_page_type().  We can't really change one but leave the other alone.

No, I mean the extra debug checking (current_locked_page_*()).
See his patch as to what he keeps for mem-sharing, and what he
drops.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 74+ messages in thread

end of thread, other threads:[~2019-05-02  7:25 UTC | newest]

Thread overview: 74+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-26 17:21 [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released Tamas K Lengyel
2019-04-26 17:21 ` [Xen-devel] " Tamas K Lengyel
2019-04-26 17:21 ` [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock Tamas K Lengyel
2019-04-26 17:21   ` [Xen-devel] " Tamas K Lengyel
2019-04-29 15:18   ` Jan Beulich
2019-04-29 15:18     ` [Xen-devel] " Jan Beulich
2019-04-29 16:35     ` Tamas K Lengyel
2019-04-29 16:35       ` [Xen-devel] " Tamas K Lengyel
2019-04-30  7:15       ` Jan Beulich
2019-04-30  7:15         ` [Xen-devel] " Jan Beulich
2019-04-30  8:28         ` Tamas K Lengyel
2019-04-30  8:28           ` [Xen-devel] " Tamas K Lengyel
2019-04-30  8:44           ` Jan Beulich
2019-04-30  8:44             ` [Xen-devel] " Jan Beulich
2019-04-30 14:19             ` Tamas K Lengyel
2019-04-30 14:19               ` [Xen-devel] " Tamas K Lengyel
2019-04-30 14:43             ` George Dunlap
2019-04-30 14:43               ` [Xen-devel] " George Dunlap
2019-04-30 14:46               ` Tamas K Lengyel
2019-04-30 14:46                 ` [Xen-devel] " Tamas K Lengyel
2019-04-30 15:06               ` Jan Beulich
2019-04-30 15:06                 ` [Xen-devel] " Jan Beulich
2019-04-30 16:03                 ` George Dunlap
2019-04-30 16:03                   ` [Xen-devel] " George Dunlap
2019-05-02  7:24                   ` Jan Beulich
2019-05-02  7:24                     ` [Xen-devel] " Jan Beulich
2019-04-29 16:42     ` George Dunlap
2019-04-29 16:42       ` [Xen-devel] " George Dunlap
2019-04-30  7:13       ` Jan Beulich
2019-04-30  7:13         ` [Xen-devel] " Jan Beulich
2019-04-26 17:21 ` [PATCH v3 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds Tamas K Lengyel
2019-04-26 17:21   ` [Xen-devel] " Tamas K Lengyel
2019-04-29 14:57   ` Jan Beulich
2019-04-29 14:57     ` [Xen-devel] " Jan Beulich
2019-04-29 15:31     ` Tamas K Lengyel
2019-04-29 15:31       ` [Xen-devel] " Tamas K Lengyel
2019-04-26 17:21 ` [PATCH v3 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled Tamas K Lengyel
2019-04-26 17:21   ` [Xen-devel] " Tamas K Lengyel
2019-04-29 15:32   ` Jan Beulich
2019-04-29 15:32     ` [Xen-devel] " Jan Beulich
2019-04-29 15:49     ` Tamas K Lengyel
2019-04-29 15:49       ` [Xen-devel] " Tamas K Lengyel
2019-04-29 14:32 ` [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released George Dunlap
2019-04-29 14:32   ` [Xen-devel] " George Dunlap
2019-04-29 14:46   ` George Dunlap
2019-04-29 14:46     ` [Xen-devel] " George Dunlap
2019-04-29 14:49     ` Andrew Cooper
2019-04-29 14:49       ` [Xen-devel] " Andrew Cooper
2019-04-29 14:54       ` George Dunlap
2019-04-29 14:54         ` [Xen-devel] " George Dunlap
2019-04-29 15:37         ` Tamas K Lengyel
2019-04-29 15:37           ` [Xen-devel] " Tamas K Lengyel
2019-04-29 15:01 ` Jan Beulich
2019-04-29 15:01   ` [Xen-devel] " Jan Beulich
2019-04-29 15:41   ` Tamas K Lengyel
2019-04-29 15:41     ` [Xen-devel] " Tamas K Lengyel
2019-04-29 15:52     ` George Dunlap
2019-04-29 15:52       ` [Xen-devel] " George Dunlap
2019-04-29 16:05       ` Tamas K Lengyel
2019-04-29 16:05         ` [Xen-devel] " Tamas K Lengyel
2019-04-29 16:14         ` Jan Beulich
2019-04-29 16:14           ` [Xen-devel] " Jan Beulich
2019-04-29 16:22           ` George Dunlap
2019-04-29 16:22             ` [Xen-devel] " George Dunlap
2019-04-29 16:26           ` Tamas K Lengyel
2019-04-29 16:26             ` [Xen-devel] " Tamas K Lengyel
2019-04-29 16:29             ` George Dunlap
2019-04-29 16:29               ` [Xen-devel] " George Dunlap
2019-04-29 16:36               ` Tamas K Lengyel
2019-04-29 16:36                 ` [Xen-devel] " Tamas K Lengyel
2019-04-29 15:44 ` George Dunlap
2019-04-29 15:44   ` [Xen-devel] " George Dunlap
2019-04-29 15:58   ` Tamas K Lengyel
2019-04-29 15:58     ` [Xen-devel] " Tamas K Lengyel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.