All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] x86/HVM: feature checking improvements
@ 2016-09-02 10:17 Jan Beulich
  2016-09-02 10:21 ` [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES Jan Beulich
  2016-09-02 10:21 ` [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling Jan Beulich
  0 siblings, 2 replies; 9+ messages in thread
From: Jan Beulich @ 2016-09-02 10:17 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima

1: VMX: correct feature checks for MPX and XSAVES
2: x86/HVM: adjust feature checking in MSR intercept handling

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES
  2016-09-02 10:17 [PATCH 0/2] x86/HVM: feature checking improvements Jan Beulich
@ 2016-09-02 10:21 ` Jan Beulich
  2016-09-05 15:11   ` Andrew Cooper
  2016-09-07  5:29   ` Tian, Kevin
  2016-09-02 10:21 ` [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling Jan Beulich
  1 sibling, 2 replies; 9+ messages in thread
From: Jan Beulich @ 2016-09-02 10:21 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 5171 bytes --]

Their VMCS fields aren't tied to the respective base CPU feature flags
but instead to VMX specific ones.

Note that while the VMCS GUEST_BNDCFGS field exists if either of the
two respective features is available, MPX continues to get exposed to
guests only with both features present.

Also add the so far missing handling of
- GUEST_BNDCFGS in construct_vmcs()
- MSR_IA32_BNDCFGS in vmx_msr_{read,write}_intercept()
and mirror the extra correctness checks during MSR write to
vmx_load_msr().

Reported-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Tested-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>

--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -168,8 +168,7 @@ static void __init calculate_hvm_feature
      */
     if ( cpu_has_vmx )
     {
-        if ( !(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
-             !(vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS) )
+        if ( !cpu_has_vmx_mpx )
             __clear_bit(X86_FEATURE_MPX, hvm_featureset);
 
         if ( !cpu_has_vmx_xsaves )
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1261,6 +1261,8 @@ static int construct_vmcs(struct vcpu *v
         __vmwrite(HOST_PAT, host_pat);
         __vmwrite(GUEST_PAT, guest_pat);
     }
+    if ( cpu_has_vmx_mpx )
+        __vmwrite(GUEST_BNDCFGS, 0);
     if ( cpu_has_vmx_xsaves )
         __vmwrite(XSS_EXIT_BITMAP, 0);
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -788,14 +788,15 @@ static int vmx_load_vmcs_ctxt(struct vcp
 
 static unsigned int __init vmx_init_msr(void)
 {
-    return !!cpu_has_mpx + !!cpu_has_xsaves;
+    return (cpu_has_mpx && cpu_has_vmx_mpx) +
+           (cpu_has_xsaves && cpu_has_vmx_xsaves);
 }
 
 static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
 {
     vmx_vmcs_enter(v);
 
-    if ( cpu_has_mpx )
+    if ( cpu_has_mpx && cpu_has_vmx_mpx )
     {
         __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
         if ( ctxt->msr[ctxt->count].val )
@@ -804,7 +805,7 @@ static void vmx_save_msr(struct vcpu *v,
 
     vmx_vmcs_exit(v);
 
-    if ( cpu_has_xsaves )
+    if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
     {
         ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss;
         if ( ctxt->msr[ctxt->count].val )
@@ -824,13 +825,15 @@ static int vmx_load_msr(struct vcpu *v,
         switch ( ctxt->msr[i].index )
         {
         case MSR_IA32_BNDCFGS:
-            if ( cpu_has_mpx )
+            if ( cpu_has_mpx && cpu_has_vmx_mpx &&
+                 is_canonical_address(ctxt->msr[i].val) &&
+                 !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
                 __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
             else if ( ctxt->msr[i].val )
                 err = -ENXIO;
             break;
         case MSR_IA32_XSS:
-            if ( cpu_has_xsaves )
+            if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
                 v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val;
             else
                 err = -ENXIO;
@@ -2643,6 +2646,11 @@ static int vmx_msr_read_intercept(unsign
     case MSR_IA32_DEBUGCTLMSR:
         __vmread(GUEST_IA32_DEBUGCTL, msr_content);
         break;
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx )
+            goto gp_fault;
+        __vmread(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
         if ( !nvmx_msr_read_intercept(msr, msr_content) )
@@ -2869,6 +2877,13 @@ static int vmx_msr_write_intercept(unsig
 
         break;
     }
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
+             !is_canonical_address(msr_content) ||
+             (msr_content & IA32_BNDCFGS_RESERVED) )
+            goto gp_fault;
+        __vmwrite(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
         if ( !nvmx_msr_write_intercept(msr, msr_content) )
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -375,6 +375,9 @@ extern u64 vmx_ept_vpid_cap;
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
 #define cpu_has_vmx_pml \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
+#define cpu_has_vmx_mpx \
+    ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
+     (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
 #define cpu_has_vmx_xsaves \
     (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
 #define cpu_has_vmx_tsc_scaling \
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -56,7 +56,10 @@
 #define MSR_IA32_DS_AREA		0x00000600
 #define MSR_IA32_PERF_CAPABILITIES	0x00000345
 
-#define MSR_IA32_BNDCFGS		0x00000D90
+#define MSR_IA32_BNDCFGS		0x00000d90
+#define IA32_BNDCFGS_ENABLE		0x00000001
+#define IA32_BNDCFGS_PRESERVE		0x00000002
+#define IA32_BNDCFGS_RESERVED		0x00000ffc
 
 #define MSR_IA32_XSS			0x00000da0
 



[-- Attachment #2: VMX-feature-checks.patch --]
[-- Type: text/plain, Size: 5217 bytes --]

VMX: correct feature checks for MPX and XSAVES

Their VMCS fields aren't tied to the respective base CPU feature flags
but instead to VMX specific ones.

Note that while the VMCS GUEST_BNDCFGS field exists if either of the
two respective features is available, MPX continues to get exposed to
guests only with both features present.

Also add the so far missing handling of
- GUEST_BNDCFGS in construct_vmcs()
- MSR_IA32_BNDCFGS in vmx_msr_{read,write}_intercept()
and mirror the extra correctness checks during MSR write to
vmx_load_msr().

Reported-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Tested-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>

--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -168,8 +168,7 @@ static void __init calculate_hvm_feature
      */
     if ( cpu_has_vmx )
     {
-        if ( !(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
-             !(vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS) )
+        if ( !cpu_has_vmx_mpx )
             __clear_bit(X86_FEATURE_MPX, hvm_featureset);
 
         if ( !cpu_has_vmx_xsaves )
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1261,6 +1261,8 @@ static int construct_vmcs(struct vcpu *v
         __vmwrite(HOST_PAT, host_pat);
         __vmwrite(GUEST_PAT, guest_pat);
     }
+    if ( cpu_has_vmx_mpx )
+        __vmwrite(GUEST_BNDCFGS, 0);
     if ( cpu_has_vmx_xsaves )
         __vmwrite(XSS_EXIT_BITMAP, 0);
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -788,14 +788,15 @@ static int vmx_load_vmcs_ctxt(struct vcp
 
 static unsigned int __init vmx_init_msr(void)
 {
-    return !!cpu_has_mpx + !!cpu_has_xsaves;
+    return (cpu_has_mpx && cpu_has_vmx_mpx) +
+           (cpu_has_xsaves && cpu_has_vmx_xsaves);
 }
 
 static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
 {
     vmx_vmcs_enter(v);
 
-    if ( cpu_has_mpx )
+    if ( cpu_has_mpx && cpu_has_vmx_mpx )
     {
         __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
         if ( ctxt->msr[ctxt->count].val )
@@ -804,7 +805,7 @@ static void vmx_save_msr(struct vcpu *v,
 
     vmx_vmcs_exit(v);
 
-    if ( cpu_has_xsaves )
+    if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
     {
         ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss;
         if ( ctxt->msr[ctxt->count].val )
@@ -824,13 +825,15 @@ static int vmx_load_msr(struct vcpu *v,
         switch ( ctxt->msr[i].index )
         {
         case MSR_IA32_BNDCFGS:
-            if ( cpu_has_mpx )
+            if ( cpu_has_mpx && cpu_has_vmx_mpx &&
+                 is_canonical_address(ctxt->msr[i].val) &&
+                 !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
                 __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
             else if ( ctxt->msr[i].val )
                 err = -ENXIO;
             break;
         case MSR_IA32_XSS:
-            if ( cpu_has_xsaves )
+            if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
                 v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val;
             else
                 err = -ENXIO;
@@ -2643,6 +2646,11 @@ static int vmx_msr_read_intercept(unsign
     case MSR_IA32_DEBUGCTLMSR:
         __vmread(GUEST_IA32_DEBUGCTL, msr_content);
         break;
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx )
+            goto gp_fault;
+        __vmread(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
         if ( !nvmx_msr_read_intercept(msr, msr_content) )
@@ -2869,6 +2877,13 @@ static int vmx_msr_write_intercept(unsig
 
         break;
     }
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
+             !is_canonical_address(msr_content) ||
+             (msr_content & IA32_BNDCFGS_RESERVED) )
+            goto gp_fault;
+        __vmwrite(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
         if ( !nvmx_msr_write_intercept(msr, msr_content) )
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -375,6 +375,9 @@ extern u64 vmx_ept_vpid_cap;
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
 #define cpu_has_vmx_pml \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
+#define cpu_has_vmx_mpx \
+    ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
+     (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
 #define cpu_has_vmx_xsaves \
     (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
 #define cpu_has_vmx_tsc_scaling \
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -56,7 +56,10 @@
 #define MSR_IA32_DS_AREA		0x00000600
 #define MSR_IA32_PERF_CAPABILITIES	0x00000345
 
-#define MSR_IA32_BNDCFGS		0x00000D90
+#define MSR_IA32_BNDCFGS		0x00000d90
+#define IA32_BNDCFGS_ENABLE		0x00000001
+#define IA32_BNDCFGS_PRESERVE		0x00000002
+#define IA32_BNDCFGS_RESERVED		0x00000ffc
 
 #define MSR_IA32_XSS			0x00000da0
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling
  2016-09-02 10:17 [PATCH 0/2] x86/HVM: feature checking improvements Jan Beulich
  2016-09-02 10:21 ` [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES Jan Beulich
@ 2016-09-02 10:21 ` Jan Beulich
  2016-09-05 15:20   ` Andrew Cooper
  2016-09-07  5:36   ` Tian, Kevin
  1 sibling, 2 replies; 9+ messages in thread
From: Jan Beulich @ 2016-09-02 10:21 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 8286 bytes --]

Consistently consult hvm_cpuid(). With that, BNDCFGS gets better
handled outside of VMX specific code, just like XSS. Don't needlessly
check for MTRR support when the MSR being accessed clearly is not an
MTRR one.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -309,6 +309,14 @@ int hvm_set_guest_pat(struct vcpu *v, u6
     return 1;
 }
 
+bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val)
+{
+    return hvm_funcs.set_guest_bndcfgs &&
+           is_canonical_address(val) &&
+           !(val & IA32_BNDCFGS_RESERVED) &&
+           hvm_funcs.set_guest_bndcfgs(v, val);
+}
+
 /*
  * Get the ratio to scale host TSC frequency to gtsc_khz. zero will be
  * returned if TSC scaling is unavailable or ratio cannot be handled
@@ -3628,28 +3636,30 @@ int hvm_msr_read_intercept(unsigned int
 {
     struct vcpu *v = current;
     uint64_t *var_range_base, *fixed_range_base;
-    bool_t mtrr;
-    unsigned int edx, index;
+    bool mtrr = false;
     int ret = X86EMUL_OKAY;
 
     var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
     fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
 
-    hvm_cpuid(1, NULL, NULL, NULL, &edx);
-    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
+    if ( msr == MSR_MTRRcap ||
+         (msr >= MSR_IA32_MTRR_PHYSBASE(0) && msr <= MSR_MTRRdefType) )
+    {
+        unsigned int edx;
+
+        hvm_cpuid(1, NULL, NULL, NULL, &edx);
+        if ( edx & cpufeat_mask(X86_FEATURE_MTRR) )
+            mtrr = true;
+    }
 
     switch ( msr )
     {
+        unsigned int eax, ebx, ecx, index;
+
     case MSR_EFER:
         *msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
-    case MSR_IA32_XSS:
-        if ( !cpu_has_xsaves )
-            goto gp_fault;
-        *msr_content = v->arch.hvm_vcpu.msr_xss;
-        break;
-
     case MSR_IA32_TSC:
         *msr_content = _hvm_rdtsc_intercept();
         break;
@@ -3715,6 +3725,22 @@ int hvm_msr_read_intercept(unsigned int
         *msr_content = var_range_base[index];
         break;
 
+    case MSR_IA32_XSS:
+        ecx = 1;
+        hvm_cpuid(XSTATE_CPUID, &eax, NULL, &ecx, NULL);
+        if ( !(eax & cpufeat_mask(X86_FEATURE_XSAVES)) )
+            goto gp_fault;
+        *msr_content = v->arch.hvm_vcpu.msr_xss;
+        break;
+
+    case MSR_IA32_BNDCFGS:
+        ecx = 0;
+        hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
+        if ( !(ebx & cpufeat_mask(X86_FEATURE_MPX)) ||
+             !hvm_get_guest_bndcfgs(v, msr_content) )
+            goto gp_fault;
+        break;
+
     case MSR_K8_ENABLE_C1E:
     case MSR_AMD64_NB_CFG:
          /*
@@ -3751,15 +3777,20 @@ int hvm_msr_write_intercept(unsigned int
                             bool_t may_defer)
 {
     struct vcpu *v = current;
-    bool_t mtrr;
-    unsigned int edx, index;
+    bool mtrr = false;
     int ret = X86EMUL_OKAY;
 
     HVMTRACE_3D(MSR_WRITE, msr,
                (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
 
-    hvm_cpuid(1, NULL, NULL, NULL, &edx);
-    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
+    if ( msr >= MSR_IA32_MTRR_PHYSBASE(0) && msr <= MSR_MTRRdefType )
+    {
+        unsigned int edx;
+
+        hvm_cpuid(1, NULL, NULL, NULL, &edx);
+        if ( edx & cpufeat_mask(X86_FEATURE_MTRR) )
+            mtrr = true;
+    }
 
     if ( may_defer && unlikely(monitored_msr(v->domain, msr)) )
     {
@@ -3776,18 +3807,13 @@ int hvm_msr_write_intercept(unsigned int
 
     switch ( msr )
     {
+        unsigned int eax, ebx, ecx, index;
+
     case MSR_EFER:
         if ( hvm_set_efer(msr_content) )
            return X86EMUL_EXCEPTION;
         break;
 
-    case MSR_IA32_XSS:
-        /* No XSS features currently supported for guests. */
-        if ( !cpu_has_xsaves || msr_content != 0 )
-            goto gp_fault;
-        v->arch.hvm_vcpu.msr_xss = msr_content;
-        break;
-
     case MSR_IA32_TSC:
         hvm_set_guest_tsc(v, msr_content);
         break;
@@ -3824,9 +3850,8 @@ int hvm_msr_write_intercept(unsigned int
         break;
 
     case MSR_MTRRcap:
-        if ( !mtrr )
-            goto gp_fault;
         goto gp_fault;
+
     case MSR_MTRRdefType:
         if ( !mtrr )
             goto gp_fault;
@@ -3866,6 +3891,23 @@ int hvm_msr_write_intercept(unsigned int
             goto gp_fault;
         break;
 
+    case MSR_IA32_XSS:
+        ecx = 1;
+        hvm_cpuid(XSTATE_CPUID, &eax, NULL, &ecx, NULL);
+        /* No XSS features currently supported for guests. */
+        if ( !(eax & cpufeat_mask(X86_FEATURE_XSAVES)) || msr_content != 0 )
+            goto gp_fault;
+        v->arch.hvm_vcpu.msr_xss = msr_content;
+        break;
+
+    case MSR_IA32_BNDCFGS:
+        ecx = 0;
+        hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
+        if ( !(ebx & cpufeat_mask(X86_FEATURE_MPX)) ||
+             !hvm_set_guest_bndcfgs(v, msr_content) )
+            goto gp_fault;
+        break;
+
     case MSR_AMD64_NB_CFG:
         /* ignore the write */
         break;
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1224,6 +1224,28 @@ static int vmx_get_guest_pat(struct vcpu
     return 1;
 }
 
+static bool vmx_set_guest_bndcfgs(struct vcpu *v, u64 val)
+{
+    ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
+
+    vmx_vmcs_enter(v);
+    __vmwrite(GUEST_BNDCFGS, val);
+    vmx_vmcs_exit(v);
+
+    return true;
+}
+
+static bool vmx_get_guest_bndcfgs(struct vcpu *v, u64 *val)
+{
+    ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
+
+    vmx_vmcs_enter(v);
+    __vmread(GUEST_BNDCFGS, val);
+    vmx_vmcs_exit(v);
+
+    return true;
+}
+
 static void vmx_handle_cd(struct vcpu *v, unsigned long value)
 {
     if ( !paging_mode_hap(v->domain) )
@@ -2323,6 +2345,12 @@ const struct hvm_function_table * __init
     if ( cpu_has_vmx_tsc_scaling )
         vmx_function_table.tsc_scaling.ratio_frac_bits = 48;
 
+    if ( cpu_has_mpx && cpu_has_vmx_mpx )
+    {
+        vmx_function_table.set_guest_bndcfgs = vmx_set_guest_bndcfgs;
+        vmx_function_table.get_guest_bndcfgs = vmx_get_guest_bndcfgs;
+    }
+
     setup_vmcs_dump();
 
     return &vmx_function_table;
@@ -2646,11 +2674,6 @@ static int vmx_msr_read_intercept(unsign
     case MSR_IA32_DEBUGCTLMSR:
         __vmread(GUEST_IA32_DEBUGCTL, msr_content);
         break;
-    case MSR_IA32_BNDCFGS:
-        if ( !cpu_has_mpx || !cpu_has_vmx_mpx )
-            goto gp_fault;
-        __vmread(GUEST_BNDCFGS, msr_content);
-        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
         if ( !nvmx_msr_read_intercept(msr, msr_content) )
@@ -2877,13 +2900,6 @@ static int vmx_msr_write_intercept(unsig
 
         break;
     }
-    case MSR_IA32_BNDCFGS:
-        if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
-             !is_canonical_address(msr_content) ||
-             (msr_content & IA32_BNDCFGS_RESERVED) )
-            goto gp_fault;
-        __vmwrite(GUEST_BNDCFGS, msr_content);
-        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
         if ( !nvmx_msr_write_intercept(msr, msr_content) )
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -147,6 +147,9 @@ struct hvm_function_table {
     int  (*get_guest_pat)(struct vcpu *v, u64 *);
     int  (*set_guest_pat)(struct vcpu *v, u64);
 
+    bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
+    bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
+
     void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
 
     void (*inject_trap)(const struct hvm_trap *trap);
@@ -383,6 +386,13 @@ static inline unsigned long hvm_get_shad
     return hvm_funcs.get_shadow_gs_base(v);
 }
 
+static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
+{
+    return hvm_funcs.get_guest_bndcfgs &&
+           hvm_funcs.get_guest_bndcfgs(v, val);
+}
+
+bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
 
 #define has_hvm_params(d) \
     ((d)->arch.hvm_domain.params != NULL)



[-- Attachment #2: x86-HVM-feature-checks.patch --]
[-- Type: text/plain, Size: 8344 bytes --]

x86/HVM: adjust feature checking in MSR intercept handling

Consistently consult hvm_cpuid(). With that, BNDCFGS gets better
handled outside of VMX specific code, just like XSS. Don't needlessly
check for MTRR support when the MSR being accessed clearly is not an
MTRR one.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -309,6 +309,14 @@ int hvm_set_guest_pat(struct vcpu *v, u6
     return 1;
 }
 
+bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val)
+{
+    return hvm_funcs.set_guest_bndcfgs &&
+           is_canonical_address(val) &&
+           !(val & IA32_BNDCFGS_RESERVED) &&
+           hvm_funcs.set_guest_bndcfgs(v, val);
+}
+
 /*
  * Get the ratio to scale host TSC frequency to gtsc_khz. zero will be
  * returned if TSC scaling is unavailable or ratio cannot be handled
@@ -3628,28 +3636,30 @@ int hvm_msr_read_intercept(unsigned int
 {
     struct vcpu *v = current;
     uint64_t *var_range_base, *fixed_range_base;
-    bool_t mtrr;
-    unsigned int edx, index;
+    bool mtrr = false;
     int ret = X86EMUL_OKAY;
 
     var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
     fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
 
-    hvm_cpuid(1, NULL, NULL, NULL, &edx);
-    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
+    if ( msr == MSR_MTRRcap ||
+         (msr >= MSR_IA32_MTRR_PHYSBASE(0) && msr <= MSR_MTRRdefType) )
+    {
+        unsigned int edx;
+
+        hvm_cpuid(1, NULL, NULL, NULL, &edx);
+        if ( edx & cpufeat_mask(X86_FEATURE_MTRR) )
+            mtrr = true;
+    }
 
     switch ( msr )
     {
+        unsigned int eax, ebx, ecx, index;
+
     case MSR_EFER:
         *msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
-    case MSR_IA32_XSS:
-        if ( !cpu_has_xsaves )
-            goto gp_fault;
-        *msr_content = v->arch.hvm_vcpu.msr_xss;
-        break;
-
     case MSR_IA32_TSC:
         *msr_content = _hvm_rdtsc_intercept();
         break;
@@ -3715,6 +3725,22 @@ int hvm_msr_read_intercept(unsigned int
         *msr_content = var_range_base[index];
         break;
 
+    case MSR_IA32_XSS:
+        ecx = 1;
+        hvm_cpuid(XSTATE_CPUID, &eax, NULL, &ecx, NULL);
+        if ( !(eax & cpufeat_mask(X86_FEATURE_XSAVES)) )
+            goto gp_fault;
+        *msr_content = v->arch.hvm_vcpu.msr_xss;
+        break;
+
+    case MSR_IA32_BNDCFGS:
+        ecx = 0;
+        hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
+        if ( !(ebx & cpufeat_mask(X86_FEATURE_MPX)) ||
+             !hvm_get_guest_bndcfgs(v, msr_content) )
+            goto gp_fault;
+        break;
+
     case MSR_K8_ENABLE_C1E:
     case MSR_AMD64_NB_CFG:
          /*
@@ -3751,15 +3777,20 @@ int hvm_msr_write_intercept(unsigned int
                             bool_t may_defer)
 {
     struct vcpu *v = current;
-    bool_t mtrr;
-    unsigned int edx, index;
+    bool mtrr = false;
     int ret = X86EMUL_OKAY;
 
     HVMTRACE_3D(MSR_WRITE, msr,
                (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
 
-    hvm_cpuid(1, NULL, NULL, NULL, &edx);
-    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
+    if ( msr >= MSR_IA32_MTRR_PHYSBASE(0) && msr <= MSR_MTRRdefType )
+    {
+        unsigned int edx;
+
+        hvm_cpuid(1, NULL, NULL, NULL, &edx);
+        if ( edx & cpufeat_mask(X86_FEATURE_MTRR) )
+            mtrr = true;
+    }
 
     if ( may_defer && unlikely(monitored_msr(v->domain, msr)) )
     {
@@ -3776,18 +3807,13 @@ int hvm_msr_write_intercept(unsigned int
 
     switch ( msr )
     {
+        unsigned int eax, ebx, ecx, index;
+
     case MSR_EFER:
         if ( hvm_set_efer(msr_content) )
            return X86EMUL_EXCEPTION;
         break;
 
-    case MSR_IA32_XSS:
-        /* No XSS features currently supported for guests. */
-        if ( !cpu_has_xsaves || msr_content != 0 )
-            goto gp_fault;
-        v->arch.hvm_vcpu.msr_xss = msr_content;
-        break;
-
     case MSR_IA32_TSC:
         hvm_set_guest_tsc(v, msr_content);
         break;
@@ -3824,9 +3850,8 @@ int hvm_msr_write_intercept(unsigned int
         break;
 
     case MSR_MTRRcap:
-        if ( !mtrr )
-            goto gp_fault;
         goto gp_fault;
+
     case MSR_MTRRdefType:
         if ( !mtrr )
             goto gp_fault;
@@ -3866,6 +3891,23 @@ int hvm_msr_write_intercept(unsigned int
             goto gp_fault;
         break;
 
+    case MSR_IA32_XSS:
+        ecx = 1;
+        hvm_cpuid(XSTATE_CPUID, &eax, NULL, &ecx, NULL);
+        /* No XSS features currently supported for guests. */
+        if ( !(eax & cpufeat_mask(X86_FEATURE_XSAVES)) || msr_content != 0 )
+            goto gp_fault;
+        v->arch.hvm_vcpu.msr_xss = msr_content;
+        break;
+
+    case MSR_IA32_BNDCFGS:
+        ecx = 0;
+        hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
+        if ( !(ebx & cpufeat_mask(X86_FEATURE_MPX)) ||
+             !hvm_set_guest_bndcfgs(v, msr_content) )
+            goto gp_fault;
+        break;
+
     case MSR_AMD64_NB_CFG:
         /* ignore the write */
         break;
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1224,6 +1224,28 @@ static int vmx_get_guest_pat(struct vcpu
     return 1;
 }
 
+static bool vmx_set_guest_bndcfgs(struct vcpu *v, u64 val)
+{
+    ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
+
+    vmx_vmcs_enter(v);
+    __vmwrite(GUEST_BNDCFGS, val);
+    vmx_vmcs_exit(v);
+
+    return true;
+}
+
+static bool vmx_get_guest_bndcfgs(struct vcpu *v, u64 *val)
+{
+    ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
+
+    vmx_vmcs_enter(v);
+    __vmread(GUEST_BNDCFGS, val);
+    vmx_vmcs_exit(v);
+
+    return true;
+}
+
 static void vmx_handle_cd(struct vcpu *v, unsigned long value)
 {
     if ( !paging_mode_hap(v->domain) )
@@ -2323,6 +2345,12 @@ const struct hvm_function_table * __init
     if ( cpu_has_vmx_tsc_scaling )
         vmx_function_table.tsc_scaling.ratio_frac_bits = 48;
 
+    if ( cpu_has_mpx && cpu_has_vmx_mpx )
+    {
+        vmx_function_table.set_guest_bndcfgs = vmx_set_guest_bndcfgs;
+        vmx_function_table.get_guest_bndcfgs = vmx_get_guest_bndcfgs;
+    }
+
     setup_vmcs_dump();
 
     return &vmx_function_table;
@@ -2646,11 +2674,6 @@ static int vmx_msr_read_intercept(unsign
     case MSR_IA32_DEBUGCTLMSR:
         __vmread(GUEST_IA32_DEBUGCTL, msr_content);
         break;
-    case MSR_IA32_BNDCFGS:
-        if ( !cpu_has_mpx || !cpu_has_vmx_mpx )
-            goto gp_fault;
-        __vmread(GUEST_BNDCFGS, msr_content);
-        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
         if ( !nvmx_msr_read_intercept(msr, msr_content) )
@@ -2877,13 +2900,6 @@ static int vmx_msr_write_intercept(unsig
 
         break;
     }
-    case MSR_IA32_BNDCFGS:
-        if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
-             !is_canonical_address(msr_content) ||
-             (msr_content & IA32_BNDCFGS_RESERVED) )
-            goto gp_fault;
-        __vmwrite(GUEST_BNDCFGS, msr_content);
-        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
         if ( !nvmx_msr_write_intercept(msr, msr_content) )
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -147,6 +147,9 @@ struct hvm_function_table {
     int  (*get_guest_pat)(struct vcpu *v, u64 *);
     int  (*set_guest_pat)(struct vcpu *v, u64);
 
+    bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
+    bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
+
     void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
 
     void (*inject_trap)(const struct hvm_trap *trap);
@@ -383,6 +386,13 @@ static inline unsigned long hvm_get_shad
     return hvm_funcs.get_shadow_gs_base(v);
 }
 
+static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
+{
+    return hvm_funcs.get_guest_bndcfgs &&
+           hvm_funcs.get_guest_bndcfgs(v, val);
+}
+
+bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
 
 #define has_hvm_params(d) \
     ((d)->arch.hvm_domain.params != NULL)

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES
  2016-09-02 10:21 ` [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES Jan Beulich
@ 2016-09-05 15:11   ` Andrew Cooper
  2016-09-07  5:29   ` Tian, Kevin
  1 sibling, 0 replies; 9+ messages in thread
From: Andrew Cooper @ 2016-09-05 15:11 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Kevin Tian, Jun Nakajima


[-- Attachment #1.1: Type: text/plain, Size: 782 bytes --]

On 02/09/16 11:21, Jan Beulich wrote:
> Their VMCS fields aren't tied to the respective base CPU feature flags
> but instead to VMX specific ones.
>
> Note that while the VMCS GUEST_BNDCFGS field exists if either of the
> two respective features is available, MPX continues to get exposed to
> guests only with both features present.
>
> Also add the so far missing handling of
> - GUEST_BNDCFGS in construct_vmcs()
> - MSR_IA32_BNDCFGS in vmx_msr_{read,write}_intercept()
> and mirror the extra correctness checks during MSR write to
> vmx_load_msr().
>
> Reported-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Tested-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

[-- Attachment #1.2: Type: text/html, Size: 1526 bytes --]

[-- Attachment #2: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling
  2016-09-02 10:21 ` [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling Jan Beulich
@ 2016-09-05 15:20   ` Andrew Cooper
  2016-09-07  5:36   ` Tian, Kevin
  1 sibling, 0 replies; 9+ messages in thread
From: Andrew Cooper @ 2016-09-05 15:20 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Kevin Tian, Jun Nakajima


[-- Attachment #1.1: Type: text/plain, Size: 460 bytes --]

On 02/09/16 11:21, Jan Beulich wrote:
> Consistently consult hvm_cpuid(). With that, BNDCFGS gets better
> handled outside of VMX specific code, just like XSS. Don't needlessly
> check for MTRR support when the MSR being accessed clearly is not an
> MTRR one.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

:( Yet more overhead from extra hvm_cpuid() calls.  I really need to get
on with fixing it.

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

[-- Attachment #1.2: Type: text/html, Size: 1029 bytes --]

[-- Attachment #2: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES
  2016-09-02 10:21 ` [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES Jan Beulich
  2016-09-05 15:11   ` Andrew Cooper
@ 2016-09-07  5:29   ` Tian, Kevin
  1 sibling, 0 replies; 9+ messages in thread
From: Tian, Kevin @ 2016-09-07  5:29 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Andrew Cooper, Nakajima, Jun

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Friday, September 02, 2016 6:21 PM
> 
> Their VMCS fields aren't tied to the respective base CPU feature flags
> but instead to VMX specific ones.
> 
> Note that while the VMCS GUEST_BNDCFGS field exists if either of the
> two respective features is available, MPX continues to get exposed to
> guests only with both features present.
> 
> Also add the so far missing handling of
> - GUEST_BNDCFGS in construct_vmcs()
> - MSR_IA32_BNDCFGS in vmx_msr_{read,write}_intercept()
> and mirror the extra correctness checks during MSR write to
> vmx_load_msr().
> 
> Reported-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Tested-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>
> 

Acked-by: Kevin Tian <kevin.tian@intel.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling
  2016-09-02 10:21 ` [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling Jan Beulich
  2016-09-05 15:20   ` Andrew Cooper
@ 2016-09-07  5:36   ` Tian, Kevin
  2016-09-07  7:58     ` Jan Beulich
  1 sibling, 1 reply; 9+ messages in thread
From: Tian, Kevin @ 2016-09-07  5:36 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Andrew Cooper, Nakajima, Jun

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Friday, September 02, 2016 6:22 PM
> 
> Consistently consult hvm_cpuid(). With that, BNDCFGS gets better
> handled outside of VMX specific code, just like XSS. Don't needlessly
> check for MTRR support when the MSR being accessed clearly is not an
> MTRR one.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

A nice cleanup. Just one comment:

> @@ -3824,9 +3850,8 @@ int hvm_msr_write_intercept(unsigned int
>          break;
> 
>      case MSR_MTRRcap:
> -        if ( !mtrr )
> -            goto gp_fault;
>          goto gp_fault;
> +

what's point of removing above mtrr check?

Thanks
Kevin

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling
  2016-09-07  5:36   ` Tian, Kevin
@ 2016-09-07  7:58     ` Jan Beulich
  2016-09-07  8:28       ` Tian, Kevin
  0 siblings, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2016-09-07  7:58 UTC (permalink / raw)
  To: Kevin Tian; +Cc: Andrew Cooper, Jun Nakajima, xen-devel

>>> On 07.09.16 at 07:36, <kevin.tian@intel.com> wrote:
>>  From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: Friday, September 02, 2016 6:22 PM
>> 
>> Consistently consult hvm_cpuid(). With that, BNDCFGS gets better
>> handled outside of VMX specific code, just like XSS. Don't needlessly
>> check for MTRR support when the MSR being accessed clearly is not an
>> MTRR one.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> 
> 
> A nice cleanup. Just one comment:
> 
>> @@ -3824,9 +3850,8 @@ int hvm_msr_write_intercept(unsigned int
>>          break;
>> 
>>      case MSR_MTRRcap:
>> -        if ( !mtrr )
>> -            goto gp_fault;
>>          goto gp_fault;
>> +
> 
> what's point of removing above mtrr check?

Isn't that obvious: No matter what value "mtrr" has, we
"goto gp_fault".

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling
  2016-09-07  7:58     ` Jan Beulich
@ 2016-09-07  8:28       ` Tian, Kevin
  0 siblings, 0 replies; 9+ messages in thread
From: Tian, Kevin @ 2016-09-07  8:28 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, Nakajima, Jun, xen-devel

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Wednesday, September 07, 2016 3:59 PM
> 
> >>> On 07.09.16 at 07:36, <kevin.tian@intel.com> wrote:
> >>  From: Jan Beulich [mailto:JBeulich@suse.com]
> >> Sent: Friday, September 02, 2016 6:22 PM
> >>
> >> Consistently consult hvm_cpuid(). With that, BNDCFGS gets better
> >> handled outside of VMX specific code, just like XSS. Don't needlessly
> >> check for MTRR support when the MSR being accessed clearly is not an
> >> MTRR one.
> >>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >>
> >
> > A nice cleanup. Just one comment:
> >
> >> @@ -3824,9 +3850,8 @@ int hvm_msr_write_intercept(unsigned int
> >>          break;
> >>
> >>      case MSR_MTRRcap:
> >> -        if ( !mtrr )
> >> -            goto gp_fault;
> >>          goto gp_fault;
> >> +
> >
> > what's point of removing above mtrr check?
> 
> Isn't that obvious: No matter what value "mtrr" has, we
> "goto gp_fault".
> 

you are right. Sorry I simply looked at same condition checks for
other MTRR registers while didn't think about its actual meaning here.

Acked-by: Kevin Tian <kevin.tian@intel.com>

Thanks
Kevin

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-09-07  8:28 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-02 10:17 [PATCH 0/2] x86/HVM: feature checking improvements Jan Beulich
2016-09-02 10:21 ` [PATCH 1/2] VMX: correct feature checks for MPX and XSAVES Jan Beulich
2016-09-05 15:11   ` Andrew Cooper
2016-09-07  5:29   ` Tian, Kevin
2016-09-02 10:21 ` [PATCH 2/2] x86/HVM: adjust feature checking in MSR intercept handling Jan Beulich
2016-09-05 15:20   ` Andrew Cooper
2016-09-07  5:36   ` Tian, Kevin
2016-09-07  7:58     ` Jan Beulich
2016-09-07  8:28       ` Tian, Kevin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.