All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2
@ 2017-02-27  3:26 Haozhong Zhang
  2017-02-27  3:26 ` [PATCH v3 2/2] x86/hvm: check HAP before enabling nested VMX Haozhong Zhang
  2017-02-28 15:15 ` [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2 Jan Beulich
  0 siblings, 2 replies; 4+ messages in thread
From: Haozhong Zhang @ 2017-02-27  3:26 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Jan Beulich, Haozhong Zhang

For a HVM domain, if a vcpu is in the nested guest mode,
__raw_copy_to_guest(), __copy_to_guest() and __copy_field_to_guest()
used by update_runstate_area() and update_secondary_system_time() will
copy data to L2 guest rather than the L1 guest.

This commit temporally clears the nested guest flag before all guest
copies in update_runstate_area() and update_secondary_system_time(),
and restores the flag after those guest copy operations.

The flag clear/restore is combined with the existing
smap_policy_change() which is renamed to update_guest_memory_policy().

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
Changes since v2:
 * Combine the fix with existing smap_policy_change() which is renamed to
   update_guest_memory_policy().
---
 xen/arch/x86/domain.c        | 39 +++++++++++++++++++++++++++++++--------
 xen/arch/x86/time.c          |  9 +++++----
 xen/include/asm-x86/domain.h | 10 ++++++++--
 3 files changed, 44 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 7d3071e..0bf1909 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -50,6 +50,7 @@
 #include <asm/mpspec.h>
 #include <asm/ldt.h>
 #include <asm/hvm/hvm.h>
+#include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/viridian.h>
 #include <asm/debugreg.h>
@@ -200,12 +201,33 @@ void dump_pageframe_info(struct domain *d)
     spin_unlock(&d->page_alloc_lock);
 }
 
-smap_check_policy_t smap_policy_change(struct vcpu *v,
-    smap_check_policy_t new_policy)
+void update_guest_memory_policy(struct vcpu *v,
+                                struct guest_memory_policy *policy)
 {
-    smap_check_policy_t old_policy = v->arch.smap_check_policy;
-    v->arch.smap_check_policy = new_policy;
-    return old_policy;
+    smap_check_policy_t old_smap_policy = v->arch.smap_check_policy;
+    bool old_guest_mode = nestedhvm_is_n2(v);
+    bool new_guest_mode = policy->nested_guest_mode;
+
+    v->arch.smap_check_policy = policy->smap_policy;
+    policy->smap_policy = old_smap_policy;
+
+    /*
+     * When 'v' is in the nested guest mode, all guest copy
+     * functions/macros which finally call paging_gva_to_gfn()
+     * transfer data to/from L2 guest. If the copy is intended for L1
+     * guest, we must first clear the nested guest flag (by setting
+     * policy->nested_guest_mode to false) before the copy and then
+     * restore the nested guest flag (by setting
+     * policy->nested_guest_mode to true) after the copy.
+     */
+    if ( unlikely(old_guest_mode != new_guest_mode) )
+    {
+        if ( new_guest_mode )
+            nestedhvm_vcpu_enter_guestmode(v);
+        else
+            nestedhvm_vcpu_exit_guestmode(v);
+        policy->nested_guest_mode = old_guest_mode;
+    }
 }
 
 #ifndef CONFIG_BIGMEM
@@ -1929,13 +1951,14 @@ static void paravirt_ctxt_switch_to(struct vcpu *v)
 bool_t update_runstate_area(struct vcpu *v)
 {
     bool_t rc;
-    smap_check_policy_t smap_policy;
+    struct guest_memory_policy policy =
+        { .smap_policy = SMAP_CHECK_ENABLED, .nested_guest_mode = false };
     void __user *guest_handle = NULL;
 
     if ( guest_handle_is_null(runstate_guest(v)) )
         return 1;
 
-    smap_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
+    update_guest_memory_policy(v, &policy);
 
     if ( VM_ASSIST(v->domain, runstate_update_flag) )
     {
@@ -1969,7 +1992,7 @@ bool_t update_runstate_area(struct vcpu *v)
                             (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
     }
 
-    smap_policy_change(v, smap_policy);
+    update_guest_memory_policy(v, &policy);
 
     return rc;
 }
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 3ad2ab0..faa638b 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -991,17 +991,18 @@ bool_t update_secondary_system_time(struct vcpu *v,
                                     struct vcpu_time_info *u)
 {
     XEN_GUEST_HANDLE(vcpu_time_info_t) user_u = v->arch.time_info_guest;
-    smap_check_policy_t saved_policy;
+    struct guest_memory_policy policy =
+        { .smap_policy = SMAP_CHECK_ENABLED, .nested_guest_mode = false };
 
     if ( guest_handle_is_null(user_u) )
         return 1;
 
-    saved_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
+    update_guest_memory_policy(v, &policy);
 
     /* 1. Update userspace version. */
     if ( __copy_field_to_guest(user_u, u, version) == sizeof(u->version) )
     {
-        smap_policy_change(v, saved_policy);
+        update_guest_memory_policy(v, &policy);
         return 0;
     }
     wmb();
@@ -1012,7 +1013,7 @@ bool_t update_secondary_system_time(struct vcpu *v,
     u->version = version_update_end(u->version);
     __copy_field_to_guest(user_u, u, version);
 
-    smap_policy_change(v, saved_policy);
+    update_guest_memory_policy(v, &policy);
 
     return 1;
 }
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 2839a73..7b05c84 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -577,8 +577,14 @@ struct arch_vcpu
     } monitor;
 };
 
-smap_check_policy_t smap_policy_change(struct vcpu *v,
-                                       smap_check_policy_t new_policy);
+struct guest_memory_policy
+{
+    smap_check_policy_t smap_policy;
+    bool nested_guest_mode;
+};
+
+void update_guest_memory_policy(struct vcpu *v,
+                                struct guest_memory_policy *policy);
 
 /* Shorthands to improve code legibility. */
 #define hvm_vmx         hvm_vcpu.u.vmx
-- 
2.10.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v3 2/2] x86/hvm: check HAP before enabling nested VMX
  2017-02-27  3:26 [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2 Haozhong Zhang
@ 2017-02-27  3:26 ` Haozhong Zhang
  2017-02-28 15:11   ` Jan Beulich
  2017-02-28 15:15 ` [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2 Jan Beulich
  1 sibling, 1 reply; 4+ messages in thread
From: Haozhong Zhang @ 2017-02-27  3:26 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Jan Beulich, Haozhong Zhang

The current implementation of nested VMX cannot work without HAP.

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
Changes since v2:
 * Add the check by dropping the cpu_has_svm check.
---
 xen/arch/x86/hvm/hvm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6621d62..f4666be 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4123,7 +4123,7 @@ static int hvmop_set_param(
          * Remove the check below once we have
          * shadow-on-shadow.
          */
-        if ( cpu_has_svm && !paging_mode_hap(d) && a.value )
+        if ( !paging_mode_hap(d) && a.value )
             rc = -EINVAL;
         if ( a.value &&
              d->arch.hvm_domain.params[HVM_PARAM_ALTP2M] )
-- 
2.10.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v3 2/2] x86/hvm: check HAP before enabling nested VMX
  2017-02-27  3:26 ` [PATCH v3 2/2] x86/hvm: check HAP before enabling nested VMX Haozhong Zhang
@ 2017-02-28 15:11   ` Jan Beulich
  0 siblings, 0 replies; 4+ messages in thread
From: Jan Beulich @ 2017-02-28 15:11 UTC (permalink / raw)
  To: Haozhong Zhang; +Cc: Andrew Cooper, xen-devel

>>> On 27.02.17 at 04:26, <haozhong.zhang@intel.com> wrote:
> The current implementation of nested VMX cannot work without HAP.
> 
> Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>

Acked-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2
  2017-02-27  3:26 [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2 Haozhong Zhang
  2017-02-27  3:26 ` [PATCH v3 2/2] x86/hvm: check HAP before enabling nested VMX Haozhong Zhang
@ 2017-02-28 15:15 ` Jan Beulich
  1 sibling, 0 replies; 4+ messages in thread
From: Jan Beulich @ 2017-02-28 15:15 UTC (permalink / raw)
  To: Haozhong Zhang; +Cc: Andrew Cooper, xen-devel

>>> On 27.02.17 at 04:26, <haozhong.zhang@intel.com> wrote:
> For a HVM domain, if a vcpu is in the nested guest mode,
> __raw_copy_to_guest(), __copy_to_guest() and __copy_field_to_guest()
> used by update_runstate_area() and update_secondary_system_time() will
> copy data to L2 guest rather than the L1 guest.
> 
> This commit temporally clears the nested guest flag before all guest
> copies in update_runstate_area() and update_secondary_system_time(),
> and restores the flag after those guest copy operations.
> 
> The flag clear/restore is combined with the existing
> smap_policy_change() which is renamed to update_guest_memory_policy().
> 
> Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-02-28 15:15 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-27  3:26 [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2 Haozhong Zhang
2017-02-27  3:26 ` [PATCH v3 2/2] x86/hvm: check HAP before enabling nested VMX Haozhong Zhang
2017-02-28 15:11   ` Jan Beulich
2017-02-28 15:15 ` [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2 Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.