All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0 of 4] xenpaging fixes for xen-unstable
@ 2011-10-03 14:43 Olaf Hering
  2011-10-03 14:43 ` [PATCH 1 of 4] xenpaging: use p2m->get_entry() in p2m_mem_paging functions Olaf Hering
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Olaf Hering @ 2011-10-03 14:43 UTC (permalink / raw)
  To: xen-devel


The following series changes the p2m_mem_paging* functions to modify the p2mt
under the p2m_lock, and there is a change to improve PoD handling.

Please review and apply.

Olaf


 xen/arch/x86/mm/p2m-pod.c |   15 ++++++++++
 xen/arch/x86/mm/p2m.c     |   69 ++++++++++++++++++++++++++++------------------
 2 files changed, 57 insertions(+), 27 deletions(-)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1 of 4] xenpaging: use p2m->get_entry() in p2m_mem_paging functions
  2011-10-03 14:43 [PATCH 0 of 4] xenpaging fixes for xen-unstable Olaf Hering
@ 2011-10-03 14:43 ` Olaf Hering
  2011-10-03 14:43 ` [PATCH 2 of 4] xenpaging: fix locking " Olaf Hering
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Olaf Hering @ 2011-10-03 14:43 UTC (permalink / raw)
  To: xen-devel

# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1317652809 -7200
# Node ID a96c307da5101c610fed1ca5fe877f220071d29e
# Parent  e78cd03b0308c3ba5737ba9821bf7272f45549ca
xenpaging: use p2m->get_entry() in p2m_mem_paging functions

Use p2m->get_entry() in the p2m_mem_paging functions. This preserves the
p2m_access type when gfn is updated with set_p2m_entry().
Its also a preparation for locking fixes in a subsequent patch.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

diff -r e78cd03b0308 -r a96c307da510 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -671,10 +671,11 @@ int p2m_mem_paging_nominate(struct domai
     struct page_info *page;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_type_t p2mt;
+    p2m_access_t a;
     mfn_t mfn;
     int ret;
 
-    mfn = gfn_to_mfn(p2m->domain, gfn, &p2mt);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
 
     /* Check if mfn is valid */
     ret = -EINVAL;
@@ -701,7 +702,7 @@ int p2m_mem_paging_nominate(struct domai
 
     /* Fix p2m entry */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access);
+    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, a);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
 
@@ -715,11 +716,12 @@ int p2m_mem_paging_evict(struct domain *
 {
     struct page_info *page;
     p2m_type_t p2mt;
+    p2m_access_t a;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Get mfn */
-    mfn = gfn_to_mfn(d, gfn, &p2mt);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     if ( unlikely(!mfn_valid(mfn)) )
         return -EINVAL;
 
@@ -738,8 +740,7 @@ int p2m_mem_paging_evict(struct domain *
 
     /* Remove mapping from p2m table */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
-                  p2m_ram_paged, p2m->default_access);
+    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, p2m_ram_paged, a);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
 
@@ -775,6 +776,7 @@ void p2m_mem_paging_populate(struct doma
     struct vcpu *v = current;
     mem_event_request_t req;
     p2m_type_t p2mt;
+    p2m_access_t a;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
@@ -787,12 +789,12 @@ void p2m_mem_paging_populate(struct doma
     /* Fix p2m mapping */
     /* XXX: It seems inefficient to have this here, as it's only needed
      *      in one case (ept guest accessing paging out page) */
-    gfn_to_mfn(d, gfn, &p2mt);
+    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     if ( p2mt == p2m_ram_paged )
     {
         p2m_lock(p2m);
         set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
-                      p2m_ram_paging_in_start, p2m->default_access);
+                      p2m_ram_paging_in_start, a);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
     }
@@ -821,8 +823,11 @@ void p2m_mem_paging_populate(struct doma
 int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
 {
     struct page_info *page;
+    p2m_type_t p2mt;
+    p2m_access_t a;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
+    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     /* Get a free page */
     page = alloc_domheap_page(p2m->domain, 0);
     if ( unlikely(page == NULL) )
@@ -830,7 +835,7 @@ int p2m_mem_paging_prep(struct domain *d
 
     /* Fix p2m mapping */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, p2m->default_access);
+    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
 
@@ -844,6 +849,7 @@ void p2m_mem_paging_resume(struct domain
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     mem_event_response_t rsp;
     p2m_type_t p2mt;
+    p2m_access_t a;
     mfn_t mfn;
 
     /* Pull the response off the ring */
@@ -852,9 +858,9 @@ void p2m_mem_paging_resume(struct domain
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
     {
-        mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
+        mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
         p2m_lock(p2m);
-        set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
+        set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a);
         set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 2 of 4] xenpaging: fix locking in p2m_mem_paging functions
  2011-10-03 14:43 [PATCH 0 of 4] xenpaging fixes for xen-unstable Olaf Hering
  2011-10-03 14:43 ` [PATCH 1 of 4] xenpaging: use p2m->get_entry() in p2m_mem_paging functions Olaf Hering
@ 2011-10-03 14:43 ` Olaf Hering
  2011-10-03 14:43 ` [PATCH 3 of 4] xenpaging: remove confusing comment from p2m_mem_paging_populate Olaf Hering
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Olaf Hering @ 2011-10-03 14:43 UTC (permalink / raw)
  To: xen-devel

# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1317652810 -7200
# Node ID 6bf1aa780b9ff40c50cfd00cfa8796ccc76286ee
# Parent  a96c307da5101c610fed1ca5fe877f220071d29e
xenpaging: fix locking in p2m_mem_paging functions

As suggested by <hongkaixing@huawei.com>, query and adjust the p2mt
under the p2m_lock to prevent races with PoD.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

diff -r a96c307da510 -r 6bf1aa780b9f xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -675,6 +675,8 @@ int p2m_mem_paging_nominate(struct domai
     mfn_t mfn;
     int ret;
 
+    p2m_lock(p2m);
+
     mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
 
     /* Check if mfn is valid */
@@ -701,14 +703,12 @@ int p2m_mem_paging_nominate(struct domai
         goto out;
 
     /* Fix p2m entry */
-    p2m_lock(p2m);
     set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, a);
     audit_p2m(p2m, 1);
-    p2m_unlock(p2m);
-
     ret = 0;
 
  out:
+    p2m_unlock(p2m);
     return ret;
 }
 
@@ -719,30 +719,31 @@ int p2m_mem_paging_evict(struct domain *
     p2m_access_t a;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    int ret = -EINVAL;
+
+    p2m_lock(p2m);
 
     /* Get mfn */
     mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     if ( unlikely(!mfn_valid(mfn)) )
-        return -EINVAL;
+        goto out;
 
     if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) ||
          (p2mt == p2m_ram_paging_in_start) )
-        return -EINVAL;
+        goto out;
 
     /* Get the page so it doesn't get modified under Xen's feet */
     page = mfn_to_page(mfn);
     if ( unlikely(!get_page(page, d)) )
-        return -EINVAL;
+        goto out;
 
     /* Decrement guest domain's ref count of the page */
     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
         put_page(page);
 
     /* Remove mapping from p2m table */
-    p2m_lock(p2m);
     set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, p2m_ram_paged, a);
     audit_p2m(p2m, 1);
-    p2m_unlock(p2m);
 
     /* Put the page back so it gets freed */
     put_page(page);
@@ -750,7 +751,11 @@ int p2m_mem_paging_evict(struct domain *
     /* Track number of paged gfns */
     atomic_inc(&d->paged_pages);
 
-    return 0;
+    ret = 0;
+
+ out:
+    p2m_unlock(p2m);
+    return ret;
 }
 
 void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
@@ -789,15 +794,15 @@ void p2m_mem_paging_populate(struct doma
     /* Fix p2m mapping */
     /* XXX: It seems inefficient to have this here, as it's only needed
      *      in one case (ept guest accessing paging out page) */
+    p2m_lock(p2m);
     p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     if ( p2mt == p2m_ram_paged )
     {
-        p2m_lock(p2m);
         set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
                       p2m_ram_paging_in_start, a);
         audit_p2m(p2m, 1);
-        p2m_unlock(p2m);
     }
+    p2m_unlock(p2m);
 
     /* Pause domain */
     if ( v->domain->domain_id == d->domain_id )
@@ -826,22 +831,28 @@ int p2m_mem_paging_prep(struct domain *d
     p2m_type_t p2mt;
     p2m_access_t a;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    int ret = -ENOMEM;
+
+    p2m_lock(p2m);
 
     p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+
     /* Get a free page */
     page = alloc_domheap_page(p2m->domain, 0);
     if ( unlikely(page == NULL) )
-        return -ENOMEM;
+        goto out;
 
     /* Fix p2m mapping */
-    p2m_lock(p2m);
     set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a);
     audit_p2m(p2m, 1);
-    p2m_unlock(p2m);
 
     atomic_dec(&d->paged_pages);
 
-    return 0;
+    ret = 0;
+
+ out:
+    p2m_unlock(p2m);
+    return ret;
 }
 
 void p2m_mem_paging_resume(struct domain *d)
@@ -858,8 +869,8 @@ void p2m_mem_paging_resume(struct domain
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
     {
+        p2m_lock(p2m);
         mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
-        p2m_lock(p2m);
         set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a);
         set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
         audit_p2m(p2m, 1);

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 3 of 4] xenpaging: remove confusing comment from p2m_mem_paging_populate
  2011-10-03 14:43 [PATCH 0 of 4] xenpaging fixes for xen-unstable Olaf Hering
  2011-10-03 14:43 ` [PATCH 1 of 4] xenpaging: use p2m->get_entry() in p2m_mem_paging functions Olaf Hering
  2011-10-03 14:43 ` [PATCH 2 of 4] xenpaging: fix locking " Olaf Hering
@ 2011-10-03 14:43 ` Olaf Hering
  2011-10-03 14:43 ` [PATCH 4 of 4] xenpaging: handle paged pages in p2m_pod_decrease_reservation Olaf Hering
  2011-10-06 11:48 ` [PATCH 0 of 4] xenpaging fixes for xen-unstable Tim Deegan
  4 siblings, 0 replies; 7+ messages in thread
From: Olaf Hering @ 2011-10-03 14:43 UTC (permalink / raw)
  To: xen-devel

# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1317652811 -7200
# Node ID 13872c432c3807e0f977d9c1311801179807ece2
# Parent  6bf1aa780b9ff40c50cfd00cfa8796ccc76286ee
xenpaging: remove confusing comment from p2m_mem_paging_populate

Currently there is no way to avoid the double check of the p2mt
because p2m_mem_paging_populate() is called from many places without
the p2m_lock held. Upcoming changes will move the function into
gfn_to_mfn(), so its interface could be changed and the extra
p2m_lock/get_entry can be removed.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

diff -r 6bf1aa780b9f -r 13872c432c38 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -792,8 +792,6 @@ void p2m_mem_paging_populate(struct doma
     req.type = MEM_EVENT_TYPE_PAGING;
 
     /* Fix p2m mapping */
-    /* XXX: It seems inefficient to have this here, as it's only needed
-     *      in one case (ept guest accessing paging out page) */
     p2m_lock(p2m);
     p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     if ( p2mt == p2m_ram_paged )

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4 of 4] xenpaging: handle paged pages in p2m_pod_decrease_reservation
  2011-10-03 14:43 [PATCH 0 of 4] xenpaging fixes for xen-unstable Olaf Hering
                   ` (2 preceding siblings ...)
  2011-10-03 14:43 ` [PATCH 3 of 4] xenpaging: remove confusing comment from p2m_mem_paging_populate Olaf Hering
@ 2011-10-03 14:43 ` Olaf Hering
  2011-10-04 16:12   ` George Dunlap
  2011-10-06 11:48 ` [PATCH 0 of 4] xenpaging fixes for xen-unstable Tim Deegan
  4 siblings, 1 reply; 7+ messages in thread
From: Olaf Hering @ 2011-10-03 14:43 UTC (permalink / raw)
  To: xen-devel

# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1317652812 -7200
# Node ID b05ede64aaf5f5090fdb844c3a58f1f92d9b3588
# Parent  13872c432c3807e0f977d9c1311801179807ece2
xenpaging: handle paged pages in p2m_pod_decrease_reservation

As suggested by <hongkaixing@huawei.com>, handle paged pages in PoD code.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

diff -r 13872c432c38 -r b05ede64aaf5 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -567,6 +567,21 @@ p2m_pod_decrease_reservation(struct doma
             BUG_ON(p2m->pod.entry_count < 0);
             pod--;
         }
+        else if ( steal_for_cache && p2m_is_paging(t) )
+        {
+            struct page_info *page;
+            /* alloc a new page to compensate the pod list */
+            page = alloc_domheap_page(d, 0);
+            if ( !page )
+                goto out_entry_check;
+            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
+            p2m_mem_paging_drop_page(d, gpfn+i);
+            p2m_pod_cache_add(p2m, page, 0);
+            steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
+            nonpod--;
+            ram--;
+        }
+        /* for other ram types */
         else if ( steal_for_cache && p2m_is_ram(t) )
         {
             struct page_info *page;

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4 of 4] xenpaging: handle paged pages in p2m_pod_decrease_reservation
  2011-10-03 14:43 ` [PATCH 4 of 4] xenpaging: handle paged pages in p2m_pod_decrease_reservation Olaf Hering
@ 2011-10-04 16:12   ` George Dunlap
  0 siblings, 0 replies; 7+ messages in thread
From: George Dunlap @ 2011-10-04 16:12 UTC (permalink / raw)
  To: Olaf Hering; +Cc: xen-devel

On Mon, Oct 3, 2011 at 3:43 PM, Olaf Hering <olaf@aepfle.de> wrote:
> # HG changeset patch
> # User Olaf Hering <olaf@aepfle.de>
> # Date 1317652812 -7200
> # Node ID b05ede64aaf5f5090fdb844c3a58f1f92d9b3588
> # Parent  13872c432c3807e0f977d9c1311801179807ece2
> xenpaging: handle paged pages in p2m_pod_decrease_reservation
>
> As suggested by <hongkaixing@huawei.com>, handle paged pages in PoD code.
>
> Signed-off-by: Olaf Hering <olaf@aepfle.de>
>
> diff -r 13872c432c38 -r b05ede64aaf5 xen/arch/x86/mm/p2m-pod.c
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -567,6 +567,21 @@ p2m_pod_decrease_reservation(struct doma
>             BUG_ON(p2m->pod.entry_count < 0);
>             pod--;
>         }
> +        else if ( steal_for_cache && p2m_is_paging(t) )
> +        {
> +            struct page_info *page;
> +            /* alloc a new page to compensate the pod list */

This can't be right.  The whole point of the "populate on demand" was
to pre-allocate a fixed amount of memory, and not need to have to
allocate any more.  What happens if this allocation fails?

It seems like a better thing to do might be this:  If we get a request
to swap out a page, and we still have PoD entries present, we "swap
out" that page as a zero page.

Hmm -- this will take some careful thought...

> +            page = alloc_domheap_page(d, 0);
> +            if ( !page )
> +                goto out_entry_check;
> +            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
> +            p2m_mem_paging_drop_page(d, gpfn+i);
> +            p2m_pod_cache_add(p2m, page, 0);
> +            steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
> +            nonpod--;
> +            ram--;
> +        }
> +        /* for other ram types */
>         else if ( steal_for_cache && p2m_is_ram(t) )
>         {
>             struct page_info *page;
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 0 of 4] xenpaging fixes for xen-unstable
  2011-10-03 14:43 [PATCH 0 of 4] xenpaging fixes for xen-unstable Olaf Hering
                   ` (3 preceding siblings ...)
  2011-10-03 14:43 ` [PATCH 4 of 4] xenpaging: handle paged pages in p2m_pod_decrease_reservation Olaf Hering
@ 2011-10-06 11:48 ` Tim Deegan
  4 siblings, 0 replies; 7+ messages in thread
From: Tim Deegan @ 2011-10-06 11:48 UTC (permalink / raw)
  To: Olaf Hering; +Cc: xen-devel

At 16:43 +0200 on 03 Oct (1317660233), Olaf Hering wrote:
> 
> The following series changes the p2m_mem_paging* functions to modify the p2mt
> under the p2m_lock, and there is a change to improve PoD handling.
> 
> Please review and apply.

I've applied patches 1-3; leaving 4 until the relationship between
paging, sharing, ballooning and PoD is properly understood.

Cheers, 

Tim.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2011-10-06 11:48 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-10-03 14:43 [PATCH 0 of 4] xenpaging fixes for xen-unstable Olaf Hering
2011-10-03 14:43 ` [PATCH 1 of 4] xenpaging: use p2m->get_entry() in p2m_mem_paging functions Olaf Hering
2011-10-03 14:43 ` [PATCH 2 of 4] xenpaging: fix locking " Olaf Hering
2011-10-03 14:43 ` [PATCH 3 of 4] xenpaging: remove confusing comment from p2m_mem_paging_populate Olaf Hering
2011-10-03 14:43 ` [PATCH 4 of 4] xenpaging: handle paged pages in p2m_pod_decrease_reservation Olaf Hering
2011-10-04 16:12   ` George Dunlap
2011-10-06 11:48 ` [PATCH 0 of 4] xenpaging fixes for xen-unstable Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.