xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tamas@tklengyel.com>
To: xen-devel@lists.xenproject.org
Cc: Tamas K Lengyel <tamas@tklengyel.com>,
	Wei Liu <wei.liu2@citrix.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Jan Beulich <jbeulich@suse.com>,
	Roger Pau Monne <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v5 1/4] x86/mem_sharing: reorder when pages are unlocked and released
Date: Thu, 16 May 2019 15:37:49 -0600	[thread overview]
Message-ID: <20190516213752.1701-1-tamas@tklengyel.com> (raw)
Message-ID: <20190516213749.Y5dfj9P__USFiJzMNbMllx63LDRJIYg4pDvHaNffpL0@z> (raw)

Calling _put_page_type while also holding the page_lock
for that page can cause a deadlock.

The comment being dropped is incorrect since it's now out-of-date.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
---
This series is based on Andrew Cooper's x86-next branch

v5: BUG_ON early before releasing references
---
 xen/arch/x86/mm/mem_sharing.c | 41 ++++++++++-------------------------
 1 file changed, 11 insertions(+), 30 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index f16a3f5324..13b2f009d4 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct page_info *page)
         return -EBUSY;
     }
 
-    /* We can only change the type if count is one */
-    /* Because we are locking pages individually, we need to drop
-     * the lock here, while the page is typed. We cannot risk the 
-     * race of page_unlock and then put_page_type. */
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
     if ( page->u.inuse.type_info != expected_type )
     {
@@ -660,12 +656,11 @@ static int page_make_private(struct domain *d, struct page_info *page)
         return -EEXIST;
     }
 
+    mem_sharing_page_unlock(page);
+
     /* Drop the final typecount */
     put_page_and_type(page);
 
-    /* Now that we've dropped the type, we can unlock */
-    mem_sharing_page_unlock(page);
-
     /* Change the owner */
     ASSERT(page_get_owner(page) == dom_cow);
     page_set_owner(page, d);
@@ -900,6 +895,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     p2m_type_t smfn_type, cmfn_type;
     struct two_gfns tg;
     struct rmap_iterator ri;
+    unsigned long put_count = 0;
 
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
                  cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
@@ -964,15 +960,6 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
         goto err_out;
     }
 
-    /* Acquire an extra reference, for the freeing below to be safe. */
-    if ( !get_page(cpage, dom_cow) )
-    {
-        ret = -EOVERFLOW;
-        mem_sharing_page_unlock(secondpg);
-        mem_sharing_page_unlock(firstpg);
-        goto err_out;
-    }
-
     /* Merge the lists together */
     rmap_seed_iterator(cpage, &ri);
     while ( (gfn = rmap_iterate(cpage, &ri)) != NULL)
@@ -984,13 +971,14 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
          * Don't change the type of rmap for the client page. */
         rmap_del(gfn, cpage, 0);
         rmap_add(gfn, spage);
-        put_page_and_type(cpage);
+        put_count++;
         d = get_domain_by_id(gfn->domain);
         BUG_ON(!d);
         BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
         put_domain(d);
     }
     ASSERT(list_empty(&cpage->sharing->gfns));
+    BUG_ON(!put_count);
 
     /* Clear the rest of the shared state */
     page_sharing_dispose(cpage);
@@ -1002,7 +990,9 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     /* Free the client page */
     if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
         put_page(cpage);
-    put_page(cpage);
+
+    while ( put_count-- )
+        put_page_and_type(cpage);
 
     /* We managed to free a domain page. */
     atomic_dec(&nr_shared_mfns);
@@ -1167,20 +1157,11 @@ int __mem_sharing_unshare_page(struct domain *d,
     {
         if ( !last_gfn )
             mem_sharing_gfn_destroy(page, d, gfn_info);
-        put_page_and_type(page);
         mem_sharing_page_unlock(page);
-        if ( last_gfn )
-        {
-            if ( !get_page(page, dom_cow) )
-            {
-                put_gfn(d, gfn);
-                domain_crash(d);
-                return -EOVERFLOW;
-            }
-            if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
-                put_page(page);
+        if ( last_gfn &&
+            test_and_clear_bit(_PGC_allocated, &page->count_info) )
             put_page(page);
-        }
+        put_page_and_type(page);
         put_gfn(d, gfn);
 
         return 0;
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

             reply	other threads:[~2019-05-16 21:38 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-16 21:37 Tamas K Lengyel [this message]
2019-05-16 21:37 ` [Xen-devel] [PATCH v5 1/4] x86/mem_sharing: reorder when pages are unlocked and released Tamas K Lengyel
2019-05-16 21:37 ` [PATCH v5 2/4] x86/mem_sharing: copy a page_lock version to be internal to memshr Tamas K Lengyel
2019-05-16 21:37   ` [Xen-devel] " Tamas K Lengyel
2019-05-17  7:21   ` Jan Beulich
2019-05-17  7:21     ` [Xen-devel] " Jan Beulich
2019-05-17 20:04     ` Tamas K Lengyel
2019-05-17 20:04       ` [Xen-devel] " Tamas K Lengyel
2019-06-17 12:21   ` Tamas K Lengyel
2019-05-16 21:37 ` [PATCH v5 3/4] x86/mem_sharing: enable mem_share audit mode only in debug builds Tamas K Lengyel
2019-05-16 21:37   ` [Xen-devel] " Tamas K Lengyel
2019-06-17 12:24   ` Tamas K Lengyel
2019-05-16 21:37 ` [PATCH v5 4/4] x86/mem_sharing: compile mem_sharing subsystem only when kconfig is enabled Tamas K Lengyel
2019-05-16 21:37   ` [Xen-devel] " Tamas K Lengyel
2019-05-17  7:23   ` Jan Beulich
2019-05-17  7:23     ` [Xen-devel] " Jan Beulich
2019-06-03  8:26   ` Jan Beulich
2019-06-03  8:26     ` [Xen-devel] " Jan Beulich
2019-06-03 16:38     ` Tamas K Lengyel
2019-06-03 16:38       ` [Xen-devel] " Tamas K Lengyel
2019-06-03 16:40       ` Julien Grall
2019-06-03 16:40         ` [Xen-devel] " Julien Grall
2019-06-03 16:55         ` Tamas K Lengyel
2019-06-03 16:55           ` [Xen-devel] " Tamas K Lengyel
2019-06-04  8:41       ` Razvan Cojocaru
2019-06-04  8:41         ` [Xen-devel] " Razvan Cojocaru
2019-06-04 14:36     ` Daniel De Graaf
2019-06-17 12:17   ` Tamas K Lengyel
2019-06-17 12:23 ` [Xen-devel] [PATCH v5 1/4] x86/mem_sharing: reorder when pages are unlocked and released Tamas K Lengyel
2019-06-17 13:46   ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190516213752.1701-1-tamas@tklengyel.com \
    --to=tamas@tklengyel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).