xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code
@ 2016-08-03 19:16 Paul Lai
  2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 1/3] altp2m cleanup work Paul Lai
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Paul Lai @ 2016-08-03 19:16 UTC (permalink / raw)
  To: xen-devel; +Cc: dunlapg, ravi.sahita, jbeulich

Incorporating comments from v1 altp2m clean code URLs:
  https://lists.xenproject.org/archives/html/xen-devel/2016-06/msg03223.html
  https://lists.xenproject.org/archives/html/xen-devel/2016-06/msg03465.html
  https://lists.xenproject.org/archives/html/xen-devel/2016-06/msg03467.html

Older comments, reason for the code clean effort, are the following URLs:
  https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04323.html
  https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04454.html
  https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04530.html

Paul Lai (3):
  altp2m cleanup work
  Move altp2m specific functions to altp2m files.
  Making altp2m struct dynamically allocated.


 xen/arch/x86/hvm/hvm.c            |  54 +++++++++-----------
 xen/arch/x86/hvm/vmx/vmx.c        |   2 +-
 xen/arch/x86/mm/altp2m.c          |  45 +++++++++++++++++
 xen/arch/x86/mm/hap/hap.c         |  35 ++-----------
 xen/arch/x86/mm/mm-locks.h        |   4 +-
 xen/arch/x86/mm/p2m-ept.c         |  39 ++++++++++++++
 xen/arch/x86/mm/p2m.c             | 104 +++++++++++++-------------------------
 xen/include/asm-x86/altp2m.h      |  11 +++-
 xen/include/asm-x86/domain.h      |   5 +-
 xen/include/asm-x86/hvm/hvm.h     |  19 +++++--
 xen/include/asm-x86/hvm/vmx/vmx.h |   3 ++
 xen/include/asm-x86/p2m.h         |  18 ++++---
 12 files changed, 193 insertions(+), 146 deletions(-)

-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 Altp2m cleanup 1/3] altp2m cleanup work
  2016-08-03 19:16 [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Paul Lai
@ 2016-08-03 19:16 ` Paul Lai
  2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 2/3] Move altp2m specific functions to altp2m files Paul Lai
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Paul Lai @ 2016-08-03 19:16 UTC (permalink / raw)
  To: xen-devel; +Cc: dunlapg, ravi.sahita, jbeulich

Indent goto labels by one space
Inline (header) altp2m functions
Define default behavior in switch
Define max and min for range of altp2m macroed values

Signed-off-by: Paul Lai <paul.c.lai@intel.com>
---
 xen/arch/x86/hvm/hvm.c        | 46 ++++++++++++++++++++-----------------------
 xen/include/asm-x86/hvm/hvm.h | 19 +++++++++++++++---
 2 files changed, 37 insertions(+), 28 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 22f045e..69daa29 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1926,11 +1926,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
      * Otherwise, this is an error condition. */
     rc = fall_through;
 
-out_put_gfn:
+ out_put_gfn:
     __put_gfn(p2m, gfn);
     if ( ap2m_active )
         __put_gfn(hostp2m, gfn);
-out:
+ out:
     /* All of these are delayed until we exit, since we might 
      * sleep on event ring wait queues, and we must not hold
      * locks in such circumstance */
@@ -5207,12 +5207,25 @@ static int do_altp2m_op(
         return -EFAULT;
 
     if ( a.pad1 || a.pad2 ||
-         (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ||
-         (a.cmd < HVMOP_altp2m_get_domain_state) ||
-         (a.cmd > HVMOP_altp2m_change_gfn) )
+        (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) )
         return -EINVAL;
 
-    d = (a.cmd != HVMOP_altp2m_vcpu_enable_notify) ?
+    switch( a.cmd )
+    {
+    case HVMOP_altp2m_get_domain_state:
+    case HVMOP_altp2m_set_domain_state:
+    case HVMOP_altp2m_vcpu_enable_notify:
+    case HVMOP_altp2m_create_p2m:
+    case HVMOP_altp2m_destroy_p2m:
+    case HVMOP_altp2m_switch_p2m:
+    case HVMOP_altp2m_set_mem_access:
+    case HVMOP_altp2m_change_gfn:
+        break;
+    default:
+        return -ENOSYS;
+    }
+
+    d = ( a.cmd != HVMOP_altp2m_vcpu_enable_notify ) ?
         rcu_lock_domain_by_any_id(a.domain) : rcu_lock_current_domain();
 
     if ( d == NULL )
@@ -5329,6 +5342,8 @@ static int do_altp2m_op(
             rc = p2m_change_altp2m_gfn(d, a.u.change_gfn.view,
                     _gfn(a.u.change_gfn.old_gfn),
                     _gfn(a.u.change_gfn.new_gfn));
+    default:
+        return -EINVAL;
     }
 
  out:
@@ -5816,25 +5831,6 @@ void hvm_toggle_singlestep(struct vcpu *v)
     v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step;
 }
 
-void altp2m_vcpu_update_p2m(struct vcpu *v)
-{
-    if ( hvm_funcs.altp2m_vcpu_update_p2m )
-        hvm_funcs.altp2m_vcpu_update_p2m(v);
-}
-
-void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
-{
-    if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
-        hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
-}
-
-bool_t altp2m_vcpu_emulate_ve(struct vcpu *v)
-{
-    if ( hvm_funcs.altp2m_vcpu_emulate_ve )
-        return hvm_funcs.altp2m_vcpu_emulate_ve(v);
-    return 0;
-}
-
 int hvm_set_mode(struct vcpu *v, int mode)
 {
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index f486ee9..231c921 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -589,13 +589,26 @@ static inline bool_t hvm_altp2m_supported(void)
 }
 
 /* updates the current hardware p2m */
-void altp2m_vcpu_update_p2m(struct vcpu *v);
+static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
+{
+    if ( hvm_funcs.altp2m_vcpu_update_p2m )
+        hvm_funcs.altp2m_vcpu_update_p2m(v);
+}
 
 /* updates VMCS fields related to VMFUNC and #VE */
-void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v);
+static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
+{
+    if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
+        hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+}
 
 /* emulates #VE */
-bool_t altp2m_vcpu_emulate_ve(struct vcpu *v);
+static inline bool_t altp2m_vcpu_emulate_ve(struct vcpu *v)
+{
+    if ( hvm_funcs.altp2m_vcpu_emulate_ve )
+        return hvm_funcs.altp2m_vcpu_emulate_ve(v);
+    return 0;
+}
 
 /* Check CR4/EFER values */
 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 Altp2m cleanup 2/3] Move altp2m specific functions to altp2m files.
  2016-08-03 19:16 [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Paul Lai
  2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 1/3] altp2m cleanup work Paul Lai
@ 2016-08-03 19:16 ` Paul Lai
  2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 3/3] Making altp2m struct dynamically allocated Paul Lai
  2016-08-03 21:20 ` [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Lai, Paul C
  3 siblings, 0 replies; 5+ messages in thread
From: Paul Lai @ 2016-08-03 19:16 UTC (permalink / raw)
  To: xen-devel; +Cc: dunlapg, ravi.sahita, jbeulich

Move altp2m specific functions to altp2m files.  This makes the code
a little easier to read.

Also moving ept code to ept specific files as requested in:
  https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04323.html

Signed-off-by: Paul Lai <paul.c.lai@intel.com>
---
 xen/arch/x86/mm/altp2m.c          | 45 +++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/mm/hap/hap.c         | 35 +++++-------------------------
 xen/arch/x86/mm/p2m-ept.c         | 39 +++++++++++++++++++++++++++++++++
 xen/arch/x86/mm/p2m.c             | 45 +++------------------------------------
 xen/include/asm-x86/altp2m.h      |  4 +++-
 xen/include/asm-x86/hvm/vmx/vmx.h |  3 +++
 xen/include/asm-x86/p2m.h         |  9 +++-----
 7 files changed, 101 insertions(+), 79 deletions(-)

diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 10605c8..ec812dd 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -17,6 +17,7 @@
 
 #include <asm/hvm/support.h>
 #include <asm/hvm/hvm.h>
+#include <asm/domain.h>
 #include <asm/p2m.h>
 #include <asm/altp2m.h>
 
@@ -65,6 +66,50 @@ altp2m_vcpu_destroy(struct vcpu *v)
         vcpu_unpause(v);
 }
 
+int
+hvm_altp2m_init( struct domain *d)
+{
+    int rc = 0;
+    unsigned int i = 0;
+
+    /* Init alternate p2m data. */
+    if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+    {
+        rc = -ENOMEM;
+        goto out;
+    }
+
+    for ( i = 0; i < MAX_EPTP; i++ )
+        d->arch.altp2m_eptp[i] = INVALID_MFN;
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        rc = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+        if ( rc != 0 )
+           goto out;
+    }
+
+    d->arch.altp2m_active = 0;
+ out:
+    return rc;
+}
+
+void
+hvm_altp2m_teardown( struct domain *d)
+{
+    unsigned int i = 0;
+    d->arch.altp2m_active = 0;
+
+    if ( d->arch.altp2m_eptp )
+    {
+        free_xenheap_page(d->arch.altp2m_eptp);
+        d->arch.altp2m_eptp = NULL;
+    }
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+        p2m_teardown(d->arch.altp2m_p2m[i]);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 9c2cd49..07833b7 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -37,6 +37,7 @@
 #include <asm/hap.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
+#include <asm/altp2m.h>
 #include <asm/domain.h>
 #include <xen/numa.h>
 #include <asm/hvm/nestedhvm.h>
@@ -501,24 +502,9 @@ int hap_enable(struct domain *d, u32 mode)
 
     if ( hvm_altp2m_supported() )
     {
-        /* Init alternate p2m data */
-        if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
-        {
-            rv = -ENOMEM;
-            goto out;
-        }
-
-        for ( i = 0; i < MAX_EPTP; i++ )
-            d->arch.altp2m_eptp[i] = INVALID_MFN;
-
-        for ( i = 0; i < MAX_ALTP2M; i++ )
-        {
-            rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
-            if ( rv != 0 )
-               goto out;
-        }
-
-        d->arch.altp2m_active = 0;
+        rv = hvm_altp2m_init(d);
+        if ( rv != 0 )
+           goto out;
     }
 
     /* Now let other users see the new mode */
@@ -534,18 +520,7 @@ void hap_final_teardown(struct domain *d)
     unsigned int i;
 
     if ( hvm_altp2m_supported() )
-    {
-        d->arch.altp2m_active = 0;
-
-        if ( d->arch.altp2m_eptp )
-        {
-            free_xenheap_page(d->arch.altp2m_eptp);
-            d->arch.altp2m_eptp = NULL;
-        }
-
-        for ( i = 0; i < MAX_ALTP2M; i++ )
-            p2m_teardown(d->arch.altp2m_p2m[i]);
-    }
+        hvm_altp2m_teardown(d);
 
     /* Destroy nestedp2m's first */
     for (i = 0; i < MAX_NESTEDP2M; i++) {
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 7166c71..b68806d 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1329,6 +1329,45 @@ void setup_ept_dump(void)
     register_keyhandler('D', ept_dump_p2m_table, "dump VT-x EPT tables", 0);
 }
 
+void p2m_init_altp2m_ept_helper( struct domain *d, unsigned int i)
+{
+    struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+    struct ept_data *ept;
+
+    p2m->min_remapped_gfn = INVALID_GFN;
+    p2m->max_remapped_gfn = 0;
+    ept = &p2m->ept;
+    ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+    d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+}
+
+unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
+{
+    struct p2m_domain *p2m;
+    struct ept_data *ept;
+    unsigned int i;
+
+    altp2m_list_lock(d);
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+            continue;
+
+        p2m = d->arch.altp2m_p2m[i];
+        ept = &p2m->ept;
+
+        if ( eptp == ept_get_eptp(ept) )
+            goto out;
+    }
+
+    i = INVALID_ALTP2M;
+
+ out:
+    altp2m_list_unlock(d);
+    return i;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 89462b2..b48cf4b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -196,8 +196,8 @@ static void p2m_teardown_altp2m(struct domain *d)
         if ( !d->arch.altp2m_p2m[i] )
             continue;
         p2m = d->arch.altp2m_p2m[i];
-        d->arch.altp2m_p2m[i] = NULL;
         p2m_free_one(p2m);
+        d->arch.altp2m_p2m[i] = NULL;
     }
 }
 
@@ -2270,33 +2270,6 @@ int unmap_mmio_regions(struct domain *d,
     return i == nr ? 0 : i ?: ret;
 }
 
-unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
-{
-    struct p2m_domain *p2m;
-    struct ept_data *ept;
-    unsigned int i;
-
-    altp2m_list_lock(d);
-
-    for ( i = 0; i < MAX_ALTP2M; i++ )
-    {
-        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
-            continue;
-
-        p2m = d->arch.altp2m_p2m[i];
-        ept = &p2m->ept;
-
-        if ( eptp == ept_get_eptp(ept) )
-            goto out;
-    }
-
-    i = INVALID_ALTP2M;
-
- out:
-    altp2m_list_unlock(d);
-    return i;
-}
-
 bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
 {
     struct domain *d = v->domain;
@@ -2402,18 +2375,6 @@ void p2m_flush_altp2m(struct domain *d)
     altp2m_list_unlock(d);
 }
 
-static void p2m_init_altp2m_helper(struct domain *d, unsigned int i)
-{
-    struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
-    struct ept_data *ept;
-
-    p2m->min_remapped_gfn = INVALID_GFN;
-    p2m->max_remapped_gfn = 0;
-    ept = &p2m->ept;
-    ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
-    d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
-}
-
 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
 {
     int rc = -EINVAL;
@@ -2425,7 +2386,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
 
     if ( d->arch.altp2m_eptp[idx] == INVALID_MFN )
     {
-        p2m_init_altp2m_helper(d, idx);
+        p2m_init_altp2m_ept_helper(d, idx);
         rc = 0;
     }
 
@@ -2445,7 +2406,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
         if ( d->arch.altp2m_eptp[i] != INVALID_MFN )
             continue;
 
-        p2m_init_altp2m_helper(d, i);
+        p2m_init_altp2m_ept_helper(d, i);
         *idx = i;
         rc = 0;
 
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index 64c7618..a236ed7 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -18,7 +18,6 @@
 #ifndef __ASM_X86_ALTP2M_H
 #define __ASM_X86_ALTP2M_H
 
-#include <xen/types.h>
 #include <xen/sched.h>         /* for struct vcpu, struct domain */
 #include <asm/hvm/vcpu.h>      /* for vcpu_altp2m */
 
@@ -38,4 +37,7 @@ static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
     return vcpu_altp2m(v).p2midx;
 }
 
+int hvm_altp2m_init(struct domain *d);
+void hvm_altp2m_teardown(struct domain *d);
+
 #endif /* __ASM_X86_ALTP2M_H */
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 359b2a9..5b495a2 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -556,6 +556,9 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
 void ept_walk_table(struct domain *d, unsigned long gfn);
 bool_t ept_handle_misconfig(uint64_t gpa);
 void setup_ept_dump(void);
+void p2m_init_altp2m_ept_helper( struct domain *d, unsigned int i);
+/* Locate an alternate p2m by its EPTP */
+unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
 
 void update_guest_eip(void);
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 65675a2..d7c8c12 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -23,8 +23,8 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef _XEN_P2M_H
-#define _XEN_P2M_H
+#ifndef _XEN_ASM_X86_P2M_H
+#define _XEN_ASM_X86_P2M_H
 
 #include <xen/config.h>
 #include <xen/paging.h>
@@ -779,9 +779,6 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
     return v->domain->arch.altp2m_p2m[index];
 }
 
-/* Locate an alternate p2m by its EPTP */
-unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
-
 /* Switch alternate p2m for a single vcpu */
 bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx);
 
@@ -843,7 +840,7 @@ static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt)
     return flags;
 }
 
-#endif /* _XEN_P2M_H */
+#endif /* _XEN_ASM_X86_P2M_H */
 
 /*
  * Local variables:
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 Altp2m cleanup 3/3] Making altp2m struct dynamically allocated.
  2016-08-03 19:16 [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Paul Lai
  2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 1/3] altp2m cleanup work Paul Lai
  2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 2/3] Move altp2m specific functions to altp2m files Paul Lai
@ 2016-08-03 19:16 ` Paul Lai
  2016-08-03 21:20 ` [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Lai, Paul C
  3 siblings, 0 replies; 5+ messages in thread
From: Paul Lai @ 2016-08-03 19:16 UTC (permalink / raw)
  To: xen-devel; +Cc: dunlapg, ravi.sahita, jbeulich

Ravi Sahita's dynamically allocated altp2m structs

Signed-off-by: Paul Lai <paul.c.lai@intel.com>
Reviewed-by: Ravi Sahita <ravi.sahita@intel.com>
---
 xen/arch/x86/hvm/hvm.c       |  8 +++---
 xen/arch/x86/hvm/vmx/vmx.c   |  2 +-
 xen/arch/x86/mm/altp2m.c     | 18 ++++++-------
 xen/arch/x86/mm/mm-locks.h   |  4 +--
 xen/arch/x86/mm/p2m-ept.c    |  8 +++---
 xen/arch/x86/mm/p2m.c        | 61 ++++++++++++++++++++++++--------------------
 xen/include/asm-x86/altp2m.h |  7 ++++-
 xen/include/asm-x86/domain.h |  5 +---
 xen/include/asm-x86/p2m.h    |  9 ++++++-
 9 files changed, 69 insertions(+), 53 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 69daa29..8a824f9 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5239,7 +5239,7 @@ static int do_altp2m_op(
 
     if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
          (a.cmd != HVMOP_altp2m_set_domain_state) &&
-         !d->arch.altp2m_active )
+         !altp2m_active(d) )
     {
         rc = -EOPNOTSUPP;
         goto out;
@@ -5273,11 +5273,11 @@ static int do_altp2m_op(
             break;
         }
 
-        ostate = d->arch.altp2m_active;
-        d->arch.altp2m_active = !!a.u.domain_state.state;
+        ostate = altp2m_active(d);
+        set_altp2m_active(d, !!a.u.domain_state.state);
 
         /* If the alternate p2m state has changed, handle appropriately */
-        if ( d->arch.altp2m_active != ostate &&
+        if ( altp2m_active(d) != ostate &&
              (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
         {
             for_each_vcpu( d, v )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 670d7dc..b522578 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2017,7 +2017,7 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
     {
         v->arch.hvm_vmx.secondary_exec_control |= mask;
         __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
-        __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
+        __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m->altp2m_eptp));
 
         if ( cpu_has_vmx_virt_exceptions )
         {
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index ec812dd..23f995c 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -73,23 +73,23 @@ hvm_altp2m_init( struct domain *d)
     unsigned int i = 0;
 
     /* Init alternate p2m data. */
-    if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+    if ( (d->arch.altp2m->altp2m_eptp = alloc_xenheap_page()) == NULL )
     {
         rc = -ENOMEM;
         goto out;
     }
 
     for ( i = 0; i < MAX_EPTP; i++ )
-        d->arch.altp2m_eptp[i] = INVALID_MFN;
+        d->arch.altp2m->altp2m_eptp[i] = INVALID_MFN;
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        rc = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+        rc = p2m_alloc_table(d->arch.altp2m->altp2m_p2m[i]);
         if ( rc != 0 )
            goto out;
     }
 
-    d->arch.altp2m_active = 0;
+    set_altp2m_active(d, 0);
  out:
     return rc;
 }
@@ -98,16 +98,16 @@ void
 hvm_altp2m_teardown( struct domain *d)
 {
     unsigned int i = 0;
-    d->arch.altp2m_active = 0;
+    set_altp2m_active(d, 0);
 
-    if ( d->arch.altp2m_eptp )
+    if ( d->arch.altp2m->altp2m_eptp )
     {
-        free_xenheap_page(d->arch.altp2m_eptp);
-        d->arch.altp2m_eptp = NULL;
+        free_xenheap_page(d->arch.altp2m->altp2m_eptp);
+        d->arch.altp2m->altp2m_eptp = NULL;
     }
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
-        p2m_teardown(d->arch.altp2m_p2m[i]);
+        p2m_teardown(d->arch.altp2m->altp2m_p2m[i]);
 }
 
 /*
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 086c8bb..4d17b0a 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -251,8 +251,8 @@ declare_mm_rwlock(p2m);
  */
 
 declare_mm_lock(altp2mlist)
-#define altp2m_list_lock(d)   mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock)
-#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock)
+#define altp2m_list_lock(d)   mm_lock(altp2mlist, &(d)->arch.altp2m->altp2m_list_lock)
+#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m->altp2m_list_lock)
 
 /* P2M lock (per-altp2m-table)
  *
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index b68806d..3fa4923 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1331,14 +1331,14 @@ void setup_ept_dump(void)
 
 void p2m_init_altp2m_ept_helper( struct domain *d, unsigned int i)
 {
-    struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+    struct p2m_domain *p2m = d->arch.altp2m->altp2m_p2m[i];
     struct ept_data *ept;
 
     p2m->min_remapped_gfn = INVALID_GFN;
     p2m->max_remapped_gfn = 0;
     ept = &p2m->ept;
     ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
-    d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+    d->arch.altp2m->altp2m_eptp[i] = ept_get_eptp(ept);
 }
 
 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
@@ -1351,10 +1351,10 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+        if ( d->arch.altp2m->altp2m_eptp[i] == INVALID_MFN )
             continue;
 
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->altp2m_p2m[i];
         ept = &p2m->ept;
 
         if ( eptp == ept_get_eptp(ept) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index b48cf4b..e207355 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -193,12 +193,15 @@ static void p2m_teardown_altp2m(struct domain *d)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( !d->arch.altp2m_p2m[i] )
+        if ( !d->arch.altp2m->altp2m_p2m[i] )
             continue;
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->altp2m_p2m[i];
         p2m_free_one(p2m);
-        d->arch.altp2m_p2m[i] = NULL;
+        d->arch.altp2m->altp2m_p2m[i] = NULL;
     }
+
+    if ( d->arch.altp2m )
+        xfree(d->arch.altp2m);
 }
 
 static int p2m_init_altp2m(struct domain *d)
@@ -206,10 +209,14 @@ static int p2m_init_altp2m(struct domain *d)
     unsigned int i;
     struct p2m_domain *p2m;
 
-    mm_lock_init(&d->arch.altp2m_list_lock);
+    d->arch.altp2m = xzalloc(struct altp2m_domain);
+    if ( d->arch.altp2m == NULL )
+         return -ENOMEM;
+
+    mm_lock_init(&d->arch.altp2m->altp2m_list_lock);
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
+        d->arch.altp2m->altp2m_p2m[i] = p2m = p2m_init_one(d);
         if ( p2m == NULL )
         {
             p2m_teardown_altp2m(d);
@@ -1838,10 +1845,10 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
     if ( altp2m_idx )
     {
         if ( altp2m_idx >= MAX_ALTP2M ||
-             d->arch.altp2m_eptp[altp2m_idx] == INVALID_MFN )
+             d->arch.altp2m->altp2m_eptp[altp2m_idx] == INVALID_MFN )
             return -EINVAL;
 
-        ap2m = d->arch.altp2m_p2m[altp2m_idx];
+        ap2m = d->arch.altp2m->altp2m_p2m[altp2m_idx];
     }
 
     switch ( access )
@@ -2280,7 +2287,7 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] != INVALID_MFN )
     {
         if ( idx != vcpu_altp2m(v).p2midx )
         {
@@ -2365,11 +2372,11 @@ void p2m_flush_altp2m(struct domain *d)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        p2m_flush_table(d->arch.altp2m_p2m[i]);
+        p2m_flush_table(d->arch.altp2m->altp2m_p2m[i]);
         /* Uninit and reinit ept to force TLB shootdown */
-        ept_p2m_uninit(d->arch.altp2m_p2m[i]);
-        ept_p2m_init(d->arch.altp2m_p2m[i]);
-        d->arch.altp2m_eptp[i] = INVALID_MFN;
+        ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[i]);
+        ept_p2m_init(d->arch.altp2m->altp2m_p2m[i]);
+        d->arch.altp2m->altp2m_eptp[i] = INVALID_MFN;
     }
 
     altp2m_list_unlock(d);
@@ -2384,7 +2391,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] == INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] == INVALID_MFN )
     {
         p2m_init_altp2m_ept_helper(d, idx);
         rc = 0;
@@ -2403,7 +2410,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] != INVALID_MFN )
+        if ( d->arch.altp2m->altp2m_eptp[i] != INVALID_MFN )
             continue;
 
         p2m_init_altp2m_ept_helper(d, i);
@@ -2429,17 +2436,17 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] != INVALID_MFN )
     {
-        p2m = d->arch.altp2m_p2m[idx];
+        p2m = d->arch.altp2m->altp2m_p2m[idx];
 
         if ( !_atomic_read(p2m->active_vcpus) )
         {
-            p2m_flush_table(d->arch.altp2m_p2m[idx]);
+            p2m_flush_table(d->arch.altp2m->altp2m_p2m[idx]);
             /* Uninit and reinit ept to force TLB shootdown */
-            ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
-            ept_p2m_init(d->arch.altp2m_p2m[idx]);
-            d->arch.altp2m_eptp[idx] = INVALID_MFN;
+            ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[idx]);
+            ept_p2m_init(d->arch.altp2m->altp2m_p2m[idx]);
+            d->arch.altp2m->altp2m_eptp[idx] = INVALID_MFN;
             rc = 0;
         }
     }
@@ -2463,7 +2470,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] != INVALID_MFN )
     {
         for_each_vcpu( d, v )
             if ( idx != vcpu_altp2m(v).p2midx )
@@ -2494,11 +2501,11 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
     unsigned int page_order;
     int rc = -EINVAL;
 
-    if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == INVALID_MFN )
+    if ( idx >= MAX_ALTP2M || d->arch.altp2m->altp2m_eptp[idx] == INVALID_MFN )
         return rc;
 
     hp2m = p2m_get_hostp2m(d);
-    ap2m = d->arch.altp2m_p2m[idx];
+    ap2m = d->arch.altp2m->altp2m_p2m[idx];
 
     p2m_lock(ap2m);
 
@@ -2589,10 +2596,10 @@ void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+        if ( d->arch.altp2m->altp2m_eptp[i] == INVALID_MFN )
             continue;
 
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->altp2m_p2m[i];
         m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
 
         /* Check for a dropped page that may impact this altp2m */
@@ -2613,10 +2620,10 @@ void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
                 for ( i = 0; i < MAX_ALTP2M; i++ )
                 {
                     if ( i == last_reset_idx ||
-                         d->arch.altp2m_eptp[i] == INVALID_MFN )
+                         d->arch.altp2m->altp2m_eptp[i] == INVALID_MFN )
                         continue;
 
-                    p2m = d->arch.altp2m_p2m[i];
+                    p2m = d->arch.altp2m->altp2m_p2m[i];
                     p2m_lock(p2m);
                     p2m_reset_altp2m(p2m);
                     p2m_unlock(p2m);
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index a236ed7..a2e6740 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -24,7 +24,12 @@
 /* Alternate p2m HVM on/off per domain */
 static inline bool_t altp2m_active(const struct domain *d)
 {
-    return d->arch.altp2m_active;
+    return d->arch.altp2m->altp2m_active;
+}
+
+static inline void set_altp2m_active(const struct domain *d, bool_t v)
+{
+    d->arch.altp2m->altp2m_active = v;
 }
 
 /* Alternate p2m VCPU */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 783fa4f..5ae4975 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -320,10 +320,7 @@ struct arch_domain
     mm_lock_t nested_p2m_lock;
 
     /* altp2m: allow multiple copies of host p2m */
-    bool_t altp2m_active;
-    struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
-    mm_lock_t altp2m_list_lock;
-    uint64_t *altp2m_eptp;
+    struct altp2m_domain *altp2m;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     struct radix_tree_root irq_pirq;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d7c8c12..f3f9710 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -338,6 +338,13 @@ struct p2m_domain {
     };
 };
 
+struct altp2m_domain {
+    bool_t altp2m_active;
+    struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
+    mm_lock_t altp2m_list_lock;
+    uint64_t *altp2m_eptp;
+};
+
 /* get host p2m table */
 #define p2m_get_hostp2m(d)      ((d)->arch.p2m)
 
@@ -776,7 +783,7 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
 
     BUG_ON(index >= MAX_ALTP2M);
 
-    return v->domain->arch.altp2m_p2m[index];
+    return v->domain->arch.altp2m->altp2m_p2m[index];
 }
 
 /* Switch alternate p2m for a single vcpu */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code
  2016-08-03 19:16 [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Paul Lai
                   ` (2 preceding siblings ...)
  2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 3/3] Making altp2m struct dynamically allocated Paul Lai
@ 2016-08-03 21:20 ` Lai, Paul C
  3 siblings, 0 replies; 5+ messages in thread
From: Lai, Paul C @ 2016-08-03 21:20 UTC (permalink / raw)
  To: Lai, Paul C, xen-devel; +Cc: dunlapg, Sahita, Ravi, jbeulich

Please disregard this submission for review.
I forgot to rebase to current master and this patch is against a code base that is out of date.

Apologies for the noise.

-----Original Message-----
From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of Paul Lai
Sent: Wednesday, August 3, 2016 12:17 PM
To: xen-devel@lists.xensource.com
Cc: dunlapg@gmail.com; Sahita, Ravi <ravi.sahita@intel.com>; jbeulich@suse.com
Subject: [Xen-devel] [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code

Incorporating comments from v1 altp2m clean code URLs:
  https://lists.xenproject.org/archives/html/xen-devel/2016-06/msg03223.html
  https://lists.xenproject.org/archives/html/xen-devel/2016-06/msg03465.html
  https://lists.xenproject.org/archives/html/xen-devel/2016-06/msg03467.html

Older comments, reason for the code clean effort, are the following URLs:
  https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04323.html
  https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04454.html
  https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04530.html

Paul Lai (3):
  altp2m cleanup work
  Move altp2m specific functions to altp2m files.
  Making altp2m struct dynamically allocated.


 xen/arch/x86/hvm/hvm.c            |  54 +++++++++-----------
 xen/arch/x86/hvm/vmx/vmx.c        |   2 +-
 xen/arch/x86/mm/altp2m.c          |  45 +++++++++++++++++
 xen/arch/x86/mm/hap/hap.c         |  35 ++-----------
 xen/arch/x86/mm/mm-locks.h        |   4 +-
 xen/arch/x86/mm/p2m-ept.c         |  39 ++++++++++++++
 xen/arch/x86/mm/p2m.c             | 104 +++++++++++++-------------------------
 xen/include/asm-x86/altp2m.h      |  11 +++-
 xen/include/asm-x86/domain.h      |   5 +-
 xen/include/asm-x86/hvm/hvm.h     |  19 +++++--
 xen/include/asm-x86/hvm/vmx/vmx.h |   3 ++
 xen/include/asm-x86/p2m.h         |  18 ++++---
 12 files changed, 193 insertions(+), 146 deletions(-)

-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-08-03 21:20 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-03 19:16 [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Paul Lai
2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 1/3] altp2m cleanup work Paul Lai
2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 2/3] Move altp2m specific functions to altp2m files Paul Lai
2016-08-03 19:16 ` [PATCH v2 Altp2m cleanup 3/3] Making altp2m struct dynamically allocated Paul Lai
2016-08-03 21:20 ` [PATCH v2 Altp2m cleanup 0/3] Cleaning up altp2m code Lai, Paul C

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).