All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy
@ 2016-07-20 18:55 Tamas K Lengyel
  2016-07-25 15:58 ` George Dunlap
  2016-07-25 16:01 ` George Dunlap
  0 siblings, 2 replies; 5+ messages in thread
From: Tamas K Lengyel @ 2016-07-20 18:55 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Tamas K Lengyel, Jan Beulich, Andrew Cooper

Move sharing locks above altp2m to avoid locking order violation and crashing
the hypervisor during unsharing operations when altp2m is active.

Applying mem_access settings or remapping gfns in altp2m views will
automatically unshare the page if it was shared previously. Also,
disallow nominating pages for which there are pre-existing altp2m
mem_access settings or remappings present. However, allow altp2m
to populate altp2m views with shared entries during lazycopy as
unsharing will automatically propagate the change to these entries in
altp2m views as well.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
---
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>

v5: Allow only lazycopy to copy shared entries to altp2m views
    Use get_gfn_type_access for unsharing as get_entry doesn't do that
---
 xen/arch/x86/mm/mem_sharing.c | 25 ++++++++++++++++++++++++-
 xen/arch/x86/mm/mm-locks.h    | 30 +++++++++++++++---------------
 xen/arch/x86/mm/p2m.c         | 12 +++++++-----
 3 files changed, 46 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index a522423..3939cd0 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -33,6 +33,7 @@
 #include <asm/page.h>
 #include <asm/string.h>
 #include <asm/p2m.h>
+#include <asm/altp2m.h>
 #include <asm/atomic.h>
 #include <asm/event.h>
 #include <xsm/xsm.h>
@@ -828,20 +829,42 @@ int mem_sharing_nominate_page(struct domain *d,
                               int expected_refcnt,
                               shr_handle_t *phandle)
 {
+    struct p2m_domain *hp2m = p2m_get_hostp2m(d);
     p2m_type_t p2mt;
+    p2m_access_t p2ma;
     mfn_t mfn;
     struct page_info *page = NULL; /* gcc... */
     int ret;
 
     *phandle = 0UL;
 
-    mfn = get_gfn(d, gfn, &p2mt);
+    mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, 0, NULL);
 
     /* Check if mfn is valid */
     ret = -EINVAL;
     if ( !mfn_valid(mfn) )
         goto out;
 
+    /* Check if there are mem_access/remapped altp2m entries for this page */
+    if ( altp2m_active(d) )
+    {
+        unsigned int i;
+        struct p2m_domain *ap2m;
+        mfn_t amfn;
+        p2m_access_t ap2ma;
+
+        for ( i = 0; i < MAX_ALTP2M; i++ )
+        {
+            ap2m = d->arch.altp2m_p2m[i];
+            if ( !ap2m )
+                continue;
+
+            amfn = get_gfn_type_access(ap2m, gfn, NULL, &ap2ma, 0, NULL);
+            if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != p2ma) )
+                goto out;
+        }
+    }
+
     /* Return the handle if the page is already shared */
     if ( p2m_is_shared(p2mt) ) {
         struct page_info *pg = __grab_shared_page(mfn);
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 086c8bb..74fdfc1 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -242,6 +242,21 @@ declare_mm_lock(nestedp2m)
 
 declare_mm_rwlock(p2m);
 
+/* Sharing per page lock
+ *
+ * This is an external lock, not represented by an mm_lock_t. The memory
+ * sharing lock uses it to protect addition and removal of (gfn,domain)
+ * tuples to a shared page. We enforce order here against the p2m lock,
+ * which is taken after the page_lock to change the gfn's p2m entry.
+ *
+ * The lock is recursive because during share we lock two pages. */
+
+declare_mm_order_constraint(per_page_sharing)
+#define page_sharing_mm_pre_lock()   mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_post_lock(l, r) \
+        mm_enforce_order_lock_post_per_page_sharing((l), (r))
+#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
+
 /* Alternate P2M list lock (per-domain)
  *
  * A per-domain lock that protects the list of alternate p2m's.
@@ -287,21 +302,6 @@ declare_mm_rwlock(altp2m);
 #define p2m_locked_by_me(p)   mm_write_locked_by_me(&(p)->lock)
 #define gfn_locked_by_me(p,g) p2m_locked_by_me(p)
 
-/* Sharing per page lock
- *
- * This is an external lock, not represented by an mm_lock_t. The memory
- * sharing lock uses it to protect addition and removal of (gfn,domain)
- * tuples to a shared page. We enforce order here against the p2m lock,
- * which is taken after the page_lock to change the gfn's p2m entry.
- *
- * The lock is recursive because during share we lock two pages. */
-
-declare_mm_order_constraint(per_page_sharing)
-#define page_sharing_mm_pre_lock()   mm_enforce_order_lock_pre_per_page_sharing()
-#define page_sharing_mm_post_lock(l, r) \
-        mm_enforce_order_lock_post_per_page_sharing((l), (r))
-#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
-
 /* PoD lock (per-p2m-table)
  * 
  * Protects private PoD data structs: entry and cache
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ff0cce8..812dbf6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1786,8 +1786,9 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
     /* Check host p2m if no valid entry in alternate */
     if ( !mfn_valid(mfn) )
     {
-        mfn = hp2m->get_entry(hp2m, gfn_l, &t, &old_a,
-                              P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
+
+        mfn = get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
+                                  P2M_ALLOC | P2M_UNSHARE, &page_order);
 
         rc = -ESRCH;
         if ( !mfn_valid(mfn) || t != p2m_ram_rw )
@@ -2363,7 +2364,7 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
         return 0;
 
     mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
-                              P2M_ALLOC | P2M_UNSHARE, &page_order);
+                              P2M_ALLOC, &page_order);
     __put_gfn(hp2m, gfn_x(gfn));
 
     if ( mfn_eq(mfn, INVALID_MFN) )
@@ -2562,8 +2563,8 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
     /* Check host p2m if no valid entry in alternate */
     if ( !mfn_valid(mfn) )
     {
-        mfn = hp2m->get_entry(hp2m, gfn_x(old_gfn), &t, &a,
-                              P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
+        mfn = get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a,
+                                  P2M_ALLOC | P2M_UNSHARE, &page_order);
 
         if ( !mfn_valid(mfn) || t != p2m_ram_rw )
             goto out;
@@ -2588,6 +2589,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
     if ( !mfn_valid(mfn) )
         mfn = hp2m->get_entry(hp2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
 
+    /* Note: currently it is not safe to remap to a shared entry */
     if ( !mfn_valid(mfn) || (t != p2m_ram_rw) )
         goto out;
 
-- 
2.8.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy
  2016-07-20 18:55 [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy Tamas K Lengyel
@ 2016-07-25 15:58 ` George Dunlap
  2016-07-25 18:10   ` Tamas K Lengyel
  2016-07-25 16:01 ` George Dunlap
  1 sibling, 1 reply; 5+ messages in thread
From: George Dunlap @ 2016-07-25 15:58 UTC (permalink / raw)
  To: Tamas K Lengyel; +Cc: xen-devel, Jan Beulich, Andrew Cooper

On Wed, Jul 20, 2016 at 7:55 PM, Tamas K Lengyel
<tamas.lengyel@zentific.com> wrote:
> Move sharing locks above altp2m to avoid locking order violation and crashing
> the hypervisor during unsharing operations when altp2m is active.
>
> Applying mem_access settings or remapping gfns in altp2m views will
> automatically unshare the page if it was shared previously. Also,
> disallow nominating pages for which there are pre-existing altp2m
> mem_access settings or remappings present. However, allow altp2m
> to populate altp2m views with shared entries during lazycopy as
> unsharing will automatically propagate the change to these entries in
> altp2m views as well.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>

This looks a lot better.  One comment...

> ---
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>
> v5: Allow only lazycopy to copy shared entries to altp2m views
>     Use get_gfn_type_access for unsharing as get_entry doesn't do that
> ---
>  xen/arch/x86/mm/mem_sharing.c | 25 ++++++++++++++++++++++++-
>  xen/arch/x86/mm/mm-locks.h    | 30 +++++++++++++++---------------
>  xen/arch/x86/mm/p2m.c         | 12 +++++++-----
>  3 files changed, 46 insertions(+), 21 deletions(-)
>
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index a522423..3939cd0 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -33,6 +33,7 @@
>  #include <asm/page.h>
>  #include <asm/string.h>
>  #include <asm/p2m.h>
> +#include <asm/altp2m.h>
>  #include <asm/atomic.h>
>  #include <asm/event.h>
>  #include <xsm/xsm.h>
> @@ -828,20 +829,42 @@ int mem_sharing_nominate_page(struct domain *d,
>                                int expected_refcnt,
>                                shr_handle_t *phandle)
>  {
> +    struct p2m_domain *hp2m = p2m_get_hostp2m(d);
>      p2m_type_t p2mt;
> +    p2m_access_t p2ma;
>      mfn_t mfn;
>      struct page_info *page = NULL; /* gcc... */
>      int ret;
>
>      *phandle = 0UL;
>
> -    mfn = get_gfn(d, gfn, &p2mt);
> +    mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, 0, NULL);
>
>      /* Check if mfn is valid */
>      ret = -EINVAL;
>      if ( !mfn_valid(mfn) )
>          goto out;
>
> +    /* Check if there are mem_access/remapped altp2m entries for this page */
> +    if ( altp2m_active(d) )
> +    {
> +        unsigned int i;
> +        struct p2m_domain *ap2m;
> +        mfn_t amfn;
> +        p2m_access_t ap2ma;
> +
> +        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        {
> +            ap2m = d->arch.altp2m_p2m[i];
> +            if ( !ap2m )
> +                continue;
> +
> +            amfn = get_gfn_type_access(ap2m, gfn, NULL, &ap2ma, 0, NULL);
> +            if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != p2ma) )
> +                goto out;
> +        }

You need to altp2m_list_[un]lock(d) around this loop, don't you?

Everything else looks great, thanks.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy
  2016-07-20 18:55 [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy Tamas K Lengyel
  2016-07-25 15:58 ` George Dunlap
@ 2016-07-25 16:01 ` George Dunlap
  2016-07-25 17:12   ` Tamas K Lengyel
  1 sibling, 1 reply; 5+ messages in thread
From: George Dunlap @ 2016-07-25 16:01 UTC (permalink / raw)
  To: Tamas K Lengyel; +Cc: xen-devel, Jan Beulich, Andrew Cooper

On Wed, Jul 20, 2016 at 7:55 PM, Tamas K Lengyel
<tamas.lengyel@zentific.com> wrote:
> Move sharing locks above altp2m to avoid locking order violation and crashing
> the hypervisor during unsharing operations when altp2m is active.
>
> Applying mem_access settings or remapping gfns in altp2m views will
> automatically unshare the page if it was shared previously. Also,
> disallow nominating pages for which there are pre-existing altp2m
> mem_access settings or remappings present. However, allow altp2m
> to populate altp2m views with shared entries during lazycopy as
> unsharing will automatically propagate the change to these entries in
> altp2m views as well.

Oh, one more thing; since you're probably respinning it anyway, could
you add the following (or something like it) to the changelog:

"While we're here, use the appropriate get_entry() wrappers."

Just to clue people in that the change is cosmetic.

Thanks.

>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
> ---
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>
> v5: Allow only lazycopy to copy shared entries to altp2m views
>     Use get_gfn_type_access for unsharing as get_entry doesn't do that
> ---
>  xen/arch/x86/mm/mem_sharing.c | 25 ++++++++++++++++++++++++-
>  xen/arch/x86/mm/mm-locks.h    | 30 +++++++++++++++---------------
>  xen/arch/x86/mm/p2m.c         | 12 +++++++-----
>  3 files changed, 46 insertions(+), 21 deletions(-)
>
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index a522423..3939cd0 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -33,6 +33,7 @@
>  #include <asm/page.h>
>  #include <asm/string.h>
>  #include <asm/p2m.h>
> +#include <asm/altp2m.h>
>  #include <asm/atomic.h>
>  #include <asm/event.h>
>  #include <xsm/xsm.h>
> @@ -828,20 +829,42 @@ int mem_sharing_nominate_page(struct domain *d,
>                                int expected_refcnt,
>                                shr_handle_t *phandle)
>  {
> +    struct p2m_domain *hp2m = p2m_get_hostp2m(d);
>      p2m_type_t p2mt;
> +    p2m_access_t p2ma;
>      mfn_t mfn;
>      struct page_info *page = NULL; /* gcc... */
>      int ret;
>
>      *phandle = 0UL;
>
> -    mfn = get_gfn(d, gfn, &p2mt);
> +    mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, 0, NULL);
>
>      /* Check if mfn is valid */
>      ret = -EINVAL;
>      if ( !mfn_valid(mfn) )
>          goto out;
>
> +    /* Check if there are mem_access/remapped altp2m entries for this page */
> +    if ( altp2m_active(d) )
> +    {
> +        unsigned int i;
> +        struct p2m_domain *ap2m;
> +        mfn_t amfn;
> +        p2m_access_t ap2ma;
> +
> +        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        {
> +            ap2m = d->arch.altp2m_p2m[i];
> +            if ( !ap2m )
> +                continue;
> +
> +            amfn = get_gfn_type_access(ap2m, gfn, NULL, &ap2ma, 0, NULL);
> +            if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != p2ma) )
> +                goto out;
> +        }
> +    }
> +
>      /* Return the handle if the page is already shared */
>      if ( p2m_is_shared(p2mt) ) {
>          struct page_info *pg = __grab_shared_page(mfn);
> diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
> index 086c8bb..74fdfc1 100644
> --- a/xen/arch/x86/mm/mm-locks.h
> +++ b/xen/arch/x86/mm/mm-locks.h
> @@ -242,6 +242,21 @@ declare_mm_lock(nestedp2m)
>
>  declare_mm_rwlock(p2m);
>
> +/* Sharing per page lock
> + *
> + * This is an external lock, not represented by an mm_lock_t. The memory
> + * sharing lock uses it to protect addition and removal of (gfn,domain)
> + * tuples to a shared page. We enforce order here against the p2m lock,
> + * which is taken after the page_lock to change the gfn's p2m entry.
> + *
> + * The lock is recursive because during share we lock two pages. */
> +
> +declare_mm_order_constraint(per_page_sharing)
> +#define page_sharing_mm_pre_lock()   mm_enforce_order_lock_pre_per_page_sharing()
> +#define page_sharing_mm_post_lock(l, r) \
> +        mm_enforce_order_lock_post_per_page_sharing((l), (r))
> +#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
> +
>  /* Alternate P2M list lock (per-domain)
>   *
>   * A per-domain lock that protects the list of alternate p2m's.
> @@ -287,21 +302,6 @@ declare_mm_rwlock(altp2m);
>  #define p2m_locked_by_me(p)   mm_write_locked_by_me(&(p)->lock)
>  #define gfn_locked_by_me(p,g) p2m_locked_by_me(p)
>
> -/* Sharing per page lock
> - *
> - * This is an external lock, not represented by an mm_lock_t. The memory
> - * sharing lock uses it to protect addition and removal of (gfn,domain)
> - * tuples to a shared page. We enforce order here against the p2m lock,
> - * which is taken after the page_lock to change the gfn's p2m entry.
> - *
> - * The lock is recursive because during share we lock two pages. */
> -
> -declare_mm_order_constraint(per_page_sharing)
> -#define page_sharing_mm_pre_lock()   mm_enforce_order_lock_pre_per_page_sharing()
> -#define page_sharing_mm_post_lock(l, r) \
> -        mm_enforce_order_lock_post_per_page_sharing((l), (r))
> -#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
> -
>  /* PoD lock (per-p2m-table)
>   *
>   * Protects private PoD data structs: entry and cache
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index ff0cce8..812dbf6 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1786,8 +1786,9 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
>      /* Check host p2m if no valid entry in alternate */
>      if ( !mfn_valid(mfn) )
>      {
> -        mfn = hp2m->get_entry(hp2m, gfn_l, &t, &old_a,
> -                              P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
> +
> +        mfn = get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
> +                                  P2M_ALLOC | P2M_UNSHARE, &page_order);
>
>          rc = -ESRCH;
>          if ( !mfn_valid(mfn) || t != p2m_ram_rw )
> @@ -2363,7 +2364,7 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
>          return 0;
>
>      mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
> -                              P2M_ALLOC | P2M_UNSHARE, &page_order);
> +                              P2M_ALLOC, &page_order);
>      __put_gfn(hp2m, gfn_x(gfn));
>
>      if ( mfn_eq(mfn, INVALID_MFN) )
> @@ -2562,8 +2563,8 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
>      /* Check host p2m if no valid entry in alternate */
>      if ( !mfn_valid(mfn) )
>      {
> -        mfn = hp2m->get_entry(hp2m, gfn_x(old_gfn), &t, &a,
> -                              P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
> +        mfn = get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a,
> +                                  P2M_ALLOC | P2M_UNSHARE, &page_order);
>
>          if ( !mfn_valid(mfn) || t != p2m_ram_rw )
>              goto out;
> @@ -2588,6 +2589,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
>      if ( !mfn_valid(mfn) )
>          mfn = hp2m->get_entry(hp2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
>
> +    /* Note: currently it is not safe to remap to a shared entry */
>      if ( !mfn_valid(mfn) || (t != p2m_ram_rw) )
>          goto out;
>
> --
> 2.8.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy
  2016-07-25 16:01 ` George Dunlap
@ 2016-07-25 17:12   ` Tamas K Lengyel
  0 siblings, 0 replies; 5+ messages in thread
From: Tamas K Lengyel @ 2016-07-25 17:12 UTC (permalink / raw)
  To: George Dunlap; +Cc: xen-devel, Jan Beulich, Andrew Cooper

On Mon, Jul 25, 2016 at 10:01 AM, George Dunlap
<George.Dunlap@eu.citrix.com> wrote:
> On Wed, Jul 20, 2016 at 7:55 PM, Tamas K Lengyel
> <tamas.lengyel@zentific.com> wrote:
>> Move sharing locks above altp2m to avoid locking order violation and crashing
>> the hypervisor during unsharing operations when altp2m is active.
>>
>> Applying mem_access settings or remapping gfns in altp2m views will
>> automatically unshare the page if it was shared previously. Also,
>> disallow nominating pages for which there are pre-existing altp2m
>> mem_access settings or remappings present. However, allow altp2m
>> to populate altp2m views with shared entries during lazycopy as
>> unsharing will automatically propagate the change to these entries in
>> altp2m views as well.
>
> Oh, one more thing; since you're probably respinning it anyway, could
> you add the following (or something like it) to the changelog:
>
> "While we're here, use the appropriate get_entry() wrappers."
>
> Just to clue people in that the change is cosmetic.
>

Sure, no problem. Is the rest of the patch fine this way from your side?

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy
  2016-07-25 15:58 ` George Dunlap
@ 2016-07-25 18:10   ` Tamas K Lengyel
  0 siblings, 0 replies; 5+ messages in thread
From: Tamas K Lengyel @ 2016-07-25 18:10 UTC (permalink / raw)
  To: George Dunlap; +Cc: xen-devel, Jan Beulich, Andrew Cooper

On Mon, Jul 25, 2016 at 9:58 AM, George Dunlap <george.dunlap@citrix.com> wrote:
> On Wed, Jul 20, 2016 at 7:55 PM, Tamas K Lengyel
> <tamas.lengyel@zentific.com> wrote:
>> Move sharing locks above altp2m to avoid locking order violation and crashing
>> the hypervisor during unsharing operations when altp2m is active.
>>
>> Applying mem_access settings or remapping gfns in altp2m views will
>> automatically unshare the page if it was shared previously. Also,
>> disallow nominating pages for which there are pre-existing altp2m
>> mem_access settings or remappings present. However, allow altp2m
>> to populate altp2m views with shared entries during lazycopy as
>> unsharing will automatically propagate the change to these entries in
>> altp2m views as well.
>>
>> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
>
> This looks a lot better.  One comment...
>
>> ---
>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>> Cc: Jan Beulich <jbeulich@suse.com>
>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>>
>> v5: Allow only lazycopy to copy shared entries to altp2m views
>>     Use get_gfn_type_access for unsharing as get_entry doesn't do that
>> ---
>>  xen/arch/x86/mm/mem_sharing.c | 25 ++++++++++++++++++++++++-
>>  xen/arch/x86/mm/mm-locks.h    | 30 +++++++++++++++---------------
>>  xen/arch/x86/mm/p2m.c         | 12 +++++++-----
>>  3 files changed, 46 insertions(+), 21 deletions(-)
>>
>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>> index a522423..3939cd0 100644
>> --- a/xen/arch/x86/mm/mem_sharing.c
>> +++ b/xen/arch/x86/mm/mem_sharing.c
>> @@ -33,6 +33,7 @@
>>  #include <asm/page.h>
>>  #include <asm/string.h>
>>  #include <asm/p2m.h>
>> +#include <asm/altp2m.h>
>>  #include <asm/atomic.h>
>>  #include <asm/event.h>
>>  #include <xsm/xsm.h>
>> @@ -828,20 +829,42 @@ int mem_sharing_nominate_page(struct domain *d,
>>                                int expected_refcnt,
>>                                shr_handle_t *phandle)
>>  {
>> +    struct p2m_domain *hp2m = p2m_get_hostp2m(d);
>>      p2m_type_t p2mt;
>> +    p2m_access_t p2ma;
>>      mfn_t mfn;
>>      struct page_info *page = NULL; /* gcc... */
>>      int ret;
>>
>>      *phandle = 0UL;
>>
>> -    mfn = get_gfn(d, gfn, &p2mt);
>> +    mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, 0, NULL);
>>
>>      /* Check if mfn is valid */
>>      ret = -EINVAL;
>>      if ( !mfn_valid(mfn) )
>>          goto out;
>>
>> +    /* Check if there are mem_access/remapped altp2m entries for this page */
>> +    if ( altp2m_active(d) )
>> +    {
>> +        unsigned int i;
>> +        struct p2m_domain *ap2m;
>> +        mfn_t amfn;
>> +        p2m_access_t ap2ma;
>> +
>> +        for ( i = 0; i < MAX_ALTP2M; i++ )
>> +        {
>> +            ap2m = d->arch.altp2m_p2m[i];
>> +            if ( !ap2m )
>> +                continue;
>> +
>> +            amfn = get_gfn_type_access(ap2m, gfn, NULL, &ap2ma, 0, NULL);
>> +            if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != p2ma) )
>> +                goto out;
>> +        }
>
> You need to altp2m_list_[un]lock(d) around this loop, don't you?

Ah yes, that's right.

>
> Everything else looks great, thanks.

Awesome!

Thanks,
Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-07-25 18:10 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-20 18:55 [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy Tamas K Lengyel
2016-07-25 15:58 ` George Dunlap
2016-07-25 18:10   ` Tamas K Lengyel
2016-07-25 16:01 ` George Dunlap
2016-07-25 17:12   ` Tamas K Lengyel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.