All of lore.kernel.org
 help / color / mirror / Atom feed
From: Julien Grall <julien.grall@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Keir Fraser <keir@xen.org>,
	ian.campbell@citrix.com, stefano.stabellini@eu.citrix.com,
	Tim Deegan <tim@xen.org>, Ian Jackson <ian.jackson@eu.citrix.com>,
	Julien Grall <julien.grall@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [RFC 2/3] xen/common: memory: Add support for direct mapped domain in XEMEM_exchange
Date: Thu, 17 Dec 2015 16:31:58 +0000	[thread overview]
Message-ID: <1450369919-22989-3-git-send-email-julien.grall@citrix.com> (raw)
In-Reply-To: <1450369919-22989-1-git-send-email-julien.grall@citrix.com>

Direct mapped domain needs to retrieve the exact same underlying
physical page when the region is re-populated.

Therefore when memory is exchanged for direct mapped domain, we don't
want to free memory of the previous region neither allocate new memory.

Note that because of that, the hypercall XENMEM_exchange can only work
on memory region that has been populated with real RAM when the domain
has been created.

Signed-off-by: Julien Grall <julien.grall@citrix.com>

---
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Tim Deegan <tim@xen.org>
---
 xen/common/memory.c | 133 +++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 90 insertions(+), 43 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index ac707e9..94c9a78 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -517,10 +517,19 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
 
                 page = mfn_to_page(mfn);
 
-                if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
+                if ( is_domain_direct_mapped(d) )
                 {
-                    put_gfn(d, gmfn + k);
+                    if ( !get_page(page, d) )
+                        rc = -EINVAL;
+                    else
+                        put_page(page);
+                }
+                else if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
                     rc = -EINVAL;
+
+                if ( unlikely(rc) )
+                {
+                    put_gfn(d, gmfn + k);
                     goto fail;
                 }
 
@@ -530,17 +539,20 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         }
 
         /* Allocate a chunk's worth of anonymous output pages. */
-        for ( j = 0; j < (1UL << out_chunk_order); j++ )
+        if ( !is_domain_direct_mapped(d) )
         {
-            page = alloc_domheap_pages(d, exch.out.extent_order,
-                                       MEMF_no_owner | memflags);
-            if ( unlikely(page == NULL) )
+            for ( j = 0; j < (1UL << out_chunk_order); j++ )
             {
-                rc = -ENOMEM;
-                goto fail;
-            }
+                page = alloc_domheap_pages(d, exch.out.extent_order,
+                                           MEMF_no_owner | memflags);
+                if ( unlikely(page == NULL) )
+                {
+                    rc = -ENOMEM;
+                    goto fail;
+                }
 
-            page_list_add(page, &out_chunk_list);
+                page_list_add(page, &out_chunk_list);
+            }
         }
 
         /*
@@ -552,47 +564,26 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         {
             unsigned long gfn;
 
-            if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
+            if ( !is_domain_direct_mapped(d) &&
+                 !test_and_clear_bit(_PGC_allocated, &page->count_info) )
                 BUG();
             mfn = page_to_mfn(page);
             gfn = mfn_to_gmfn(d, mfn);
             /* Pages were unshared above */
             BUG_ON(SHARED_M2P(gfn));
             guest_physmap_remove_page(d, gfn, mfn, 0);
-            put_page(page);
+
+            /*
+             * For domain direct mapped, we want to be able to get
+             * the same page later, so don't deallocate it
+             */
+            if ( !is_domain_direct_mapped(d) )
+                put_page(page);
         }
 
         /* Assign each output page to the domain. */
-        for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
+        for ( j = 0; j < (1UL << out_chunk_order); j++ )
         {
-            if ( assign_pages(d, page, exch.out.extent_order,
-                              MEMF_no_refcount) )
-            {
-                unsigned long dec_count;
-                bool_t drop_dom_ref;
-
-                /*
-                 * Pages in in_chunk_list is stolen without
-                 * decreasing the tot_pages. If the domain is dying when
-                 * assign pages, we need decrease the count. For those pages
-                 * that has been assigned, it should be covered by
-                 * domain_relinquish_resources().
-                 */
-                dec_count = (((1UL << exch.in.extent_order) *
-                              (1UL << in_chunk_order)) -
-                             (j * (1UL << exch.out.extent_order)));
-
-                spin_lock(&d->page_alloc_lock);
-                drop_dom_ref = (dec_count &&
-                                !domain_adjust_tot_pages(d, -dec_count));
-                spin_unlock(&d->page_alloc_lock);
-
-                if ( drop_dom_ref )
-                    put_domain(d);
-
-                free_domheap_pages(page, exch.out.extent_order);
-                goto dying;
-            }
 
             if ( __copy_from_guest_offset(&gpfn, exch.out.extent_start,
                                           (i << out_chunk_order) + j, 1) )
@@ -601,7 +592,61 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
                 continue;
             }
 
-            mfn = page_to_mfn(page);
+            if ( is_domain_direct_mapped(d) )
+            {
+                if ( unlikely(d->is_dying) )
+                {
+                    gdprintk(XENLOG_INFO,
+                             "Cannot assign page to domain %d -- dying.\n",
+                             d->domain_id);
+                    goto dying;
+                }
+
+                if ( !check_range_domain_direct_mapped(d, gpfn,
+                                                       exch.out.extent_order) )
+                    goto dying;
+
+                mfn = gpfn;
+            }
+            else
+            {
+                page = page_list_remove_head(&out_chunk_list);
+
+                /* The outchunk list should always contain enough page */
+                BUG_ON(!page);
+
+                if ( assign_pages(d, page, exch.out.extent_order,
+                                  MEMF_no_refcount) )
+                {
+                    unsigned long dec_count;
+                    bool_t drop_dom_ref;
+
+                    /*
+                     * Pages in in_chunk_list is stolen without
+                     * decreasing the tot_pages. If the domain is dying when
+                     * assign pages, we need decrease the count. For those pages
+                     * that has been assigned, it should be covered by
+                     * domain_relinquish_resources().
+                     */
+                    dec_count = (((1UL << exch.in.extent_order) *
+                                  (1UL << in_chunk_order)) -
+                                 (j * (1UL << exch.out.extent_order)));
+
+                    spin_lock(&d->page_alloc_lock);
+                    drop_dom_ref = (dec_count &&
+                                    !domain_adjust_tot_pages(d, -dec_count));
+                    spin_unlock(&d->page_alloc_lock);
+
+                    if ( drop_dom_ref )
+                        put_domain(d);
+
+                    free_domheap_pages(page, exch.out.extent_order);
+                    goto dying;
+                }
+
+                mfn = page_to_mfn(page);
+            }
+
             guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
 
             if ( !paging_mode_translate(d) )
@@ -630,7 +675,8 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
  fail:
     /* Reassign any input pages we managed to steal. */
     while ( (page = page_list_remove_head(&in_chunk_list)) )
-        if ( assign_pages(d, page, 0, MEMF_no_refcount) )
+        if ( is_domain_direct_mapped(d) &&
+             assign_pages(d, page, 0, MEMF_no_refcount) )
         {
             BUG_ON(!d->is_dying);
             if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
@@ -640,6 +686,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
  dying:
     rcu_unlock_domain(d);
     /* Free any output pages we managed to allocate. */
+    BUG_ON(is_domain_direct_mapped(d) && !page_list_empty(&out_chunk_list));
     while ( (page = page_list_remove_head(&out_chunk_list)) )
         free_domheap_pages(page, exch.out.extent_order);
 
-- 
2.1.4

  parent reply	other threads:[~2015-12-17 16:33 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-17 16:31 [RFC 0/3] xen/arm: Support XENMEM_exchange Julien Grall
2015-12-17 16:31 ` [RFC 1/3] xen/common: memory: Introduce check_range_domain_direct_mapped Julien Grall
2015-12-17 16:31 ` Julien Grall [this message]
2015-12-22 16:18   ` [RFC 2/3] xen/common: memory: Add support for direct mapped domain in XEMEM_exchange Jan Beulich
2015-12-17 16:31 ` [RFC 3/3] xen/common: memory: Move steal_page in common code Julien Grall
2015-12-22 16:20   ` Jan Beulich
2015-12-22 16:16 ` [RFC 0/3] xen/arm: Support XENMEM_exchange Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1450369919-22989-3-git-send-email-julien.grall@citrix.com \
    --to=julien.grall@citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.