All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 0/4] x86/HVM: miscellaneous improvements
@ 2013-09-23 10:04 Jan Beulich
  2013-09-23 10:10 ` [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs Jan Beulich
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Jan Beulich @ 2013-09-23 10:04 UTC (permalink / raw)
  To: xen-devel; +Cc: Yang Z Zhang, Eddie Dong, Jun Nakajima

The first and third patches are cleaned up versions of an earlier v3
submission by Yang.

1: Nested VMX: check VMX capability before read VMX related MSRs
2: VMX: clean up capability checks
3: Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation
4: x86: make hvm_cpuid() tolerate NULL pointers

Signed-off-by: Jan Beulich <jbeulich@suse.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs
  2013-09-23 10:04 [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
@ 2013-09-23 10:10 ` Jan Beulich
  2013-09-23 14:50   ` Boris Ostrovsky
  2013-09-23 10:10 ` [PATCH v4 2/4] VMX: clean up capability checks Jan Beulich
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2013-09-23 10:10 UTC (permalink / raw)
  To: xen-devel; +Cc: Yang Z Zhang, Eddie Dong, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 2447 bytes --]

VMX MSRs only available when the CPU support the VMX feature. In addition,
VMX_TRUE* MSRs only available when bit 55 of VMX_BASIC MSR is set.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>

Cleanup.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -78,6 +78,7 @@ static DEFINE_PER_CPU(struct list_head, 
 static DEFINE_PER_CPU(bool_t, vmxon);
 
 static u32 vmcs_revision_id __read_mostly;
+u64 __read_mostly vmx_basic_msr;
 
 static void __init vmx_display_features(void)
 {
@@ -301,6 +302,8 @@ static int vmx_init_vmcs_config(void)
         vmx_vmexit_control         = _vmx_vmexit_control;
         vmx_vmentry_control        = _vmx_vmentry_control;
         cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
+        vmx_basic_msr              = ((u64)vmx_basic_msr_high << 32) |
+                                     vmx_basic_msr_low;
         vmx_display_features();
     }
     else
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1813,12 +1813,33 @@ int nvmx_handle_invvpid(struct cpu_user_
 int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
 {
     struct vcpu *v = current;
+    unsigned int ecx, dummy;
     u64 data = 0, host_data = 0;
     int r = 1;
 
     if ( !nestedhvm_enabled(v->domain) )
         return 0;
 
+    /* VMX capablity MSRs are available only when guest supports VMX. */
+    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &dummy);
+    if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
+        return 0;
+
+    /*
+     * Those MSRs are available only when bit 55 of
+     * MSR_IA32_VMX_BASIC is set.
+     */
+    switch ( msr )
+    {
+    case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+    case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+    case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+    case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+        if ( !(vmx_basic_msr & VMX_BASIC_DEFAULT1_ZERO) )
+            return 0;
+        break;
+    }
+
     rdmsrl(msr, host_data);
 
     /*
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -284,6 +284,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
  */
 #define VMX_BASIC_DEFAULT1_ZERO		(1ULL << 55)
 
+extern u64 vmx_basic_msr;
+
 /* Guest interrupt status */
 #define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF
 #define VMX_GUEST_INTR_STATUS_SVI_OFFSET        8




[-- Attachment #2: nVMX-check-capabilities.patch --]
[-- Type: text/plain, Size: 2506 bytes --]

Nested VMX: check VMX capability before read VMX related MSRs

VMX MSRs only available when the CPU support the VMX feature. In addition,
VMX_TRUE* MSRs only available when bit 55 of VMX_BASIC MSR is set.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>

Cleanup.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -78,6 +78,7 @@ static DEFINE_PER_CPU(struct list_head, 
 static DEFINE_PER_CPU(bool_t, vmxon);
 
 static u32 vmcs_revision_id __read_mostly;
+u64 __read_mostly vmx_basic_msr;
 
 static void __init vmx_display_features(void)
 {
@@ -301,6 +302,8 @@ static int vmx_init_vmcs_config(void)
         vmx_vmexit_control         = _vmx_vmexit_control;
         vmx_vmentry_control        = _vmx_vmentry_control;
         cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
+        vmx_basic_msr              = ((u64)vmx_basic_msr_high << 32) |
+                                     vmx_basic_msr_low;
         vmx_display_features();
     }
     else
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1813,12 +1813,33 @@ int nvmx_handle_invvpid(struct cpu_user_
 int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
 {
     struct vcpu *v = current;
+    unsigned int ecx, dummy;
     u64 data = 0, host_data = 0;
     int r = 1;
 
     if ( !nestedhvm_enabled(v->domain) )
         return 0;
 
+    /* VMX capablity MSRs are available only when guest supports VMX. */
+    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &dummy);
+    if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
+        return 0;
+
+    /*
+     * Those MSRs are available only when bit 55 of
+     * MSR_IA32_VMX_BASIC is set.
+     */
+    switch ( msr )
+    {
+    case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+    case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+    case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+    case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+        if ( !(vmx_basic_msr & VMX_BASIC_DEFAULT1_ZERO) )
+            return 0;
+        break;
+    }
+
     rdmsrl(msr, host_data);
 
     /*
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -284,6 +284,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
  */
 #define VMX_BASIC_DEFAULT1_ZERO		(1ULL << 55)
 
+extern u64 vmx_basic_msr;
+
 /* Guest interrupt status */
 #define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF
 #define VMX_GUEST_INTR_STATUS_SVI_OFFSET        8

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v4 2/4] VMX: clean up capability checks
  2013-09-23 10:04 [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
  2013-09-23 10:10 ` [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs Jan Beulich
@ 2013-09-23 10:10 ` Jan Beulich
  2013-09-23 10:11 ` [PATCH v4 3/4] Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation Jan Beulich
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2013-09-23 10:10 UTC (permalink / raw)
  To: xen-devel; +Cc: Yang Z Zhang, Eddie Dong, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 6281 bytes --]

VMCS size validation on APs should check against BP's size.

No need for a separate cpu_has_vmx_ins_outs_instr_info variable
anymore.

Use proper symbolics.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -70,7 +70,6 @@ u32 vmx_secondary_exec_control __read_mo
 u32 vmx_vmexit_control __read_mostly;
 u32 vmx_vmentry_control __read_mostly;
 u64 vmx_ept_vpid_cap __read_mostly;
-bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
 
 static DEFINE_PER_CPU_READ_MOSTLY(struct vmcs_struct *, vmxon_region);
 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
@@ -294,24 +293,33 @@ static int vmx_init_vmcs_config(void)
     if ( !vmx_pin_based_exec_control )
     {
         /* First time through. */
-        vmcs_revision_id = vmx_basic_msr_low;
+        vmcs_revision_id           = vmx_basic_msr_low & VMX_BASIC_REVISION_MASK;
         vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
         vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
         vmx_secondary_exec_control = _vmx_secondary_exec_control;
         vmx_ept_vpid_cap           = _vmx_ept_vpid_cap;
         vmx_vmexit_control         = _vmx_vmexit_control;
         vmx_vmentry_control        = _vmx_vmentry_control;
-        cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
         vmx_basic_msr              = ((u64)vmx_basic_msr_high << 32) |
                                      vmx_basic_msr_low;
         vmx_display_features();
+
+        /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
+        if ( (vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32)) >
+             PAGE_SIZE )
+        {
+            printk("VMX: CPU%d VMCS size is too big (%Lu bytes)\n",
+                   smp_processor_id(),
+                   vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32));
+            return -EINVAL;
+        }
     }
     else
     {
         /* Globals are already initialised: re-check them. */
         mismatch |= cap_check(
             "VMCS revision ID",
-            vmcs_revision_id, vmx_basic_msr_low);
+            vmcs_revision_id, vmx_basic_msr_low & VMX_BASIC_REVISION_MASK);
         mismatch |= cap_check(
             "Pin-Based Exec Control",
             vmx_pin_based_exec_control, _vmx_pin_based_exec_control);
@@ -331,13 +339,21 @@ static int vmx_init_vmcs_config(void)
             "EPT and VPID Capability",
             vmx_ept_vpid_cap, _vmx_ept_vpid_cap);
         if ( cpu_has_vmx_ins_outs_instr_info !=
-             !!(vmx_basic_msr_high & (1U<<22)) )
+             !!(vmx_basic_msr_high & (VMX_BASIC_INS_OUT_INFO >> 32)) )
         {
             printk("VMX INS/OUTS Instruction Info: saw %d expected %d\n",
-                   !!(vmx_basic_msr_high & (1U<<22)),
+                   !!(vmx_basic_msr_high & (VMX_BASIC_INS_OUT_INFO >> 32)),
                    cpu_has_vmx_ins_outs_instr_info);
             mismatch = 1;
         }
+        if ( (vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32)) !=
+             ((vmx_basic_msr & VMX_BASIC_VMCS_SIZE_MASK) >> 32) )
+        {
+            printk("VMX: CPU%d unexpected VMCS size %Lu\n",
+                   smp_processor_id(),
+                   vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32));
+            mismatch = 1;
+        }
         if ( mismatch )
         {
             printk("VMX: Capabilities fatally differ between CPU%d and CPU0\n",
@@ -346,16 +362,8 @@ static int vmx_init_vmcs_config(void)
         }
     }
 
-    /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
-    if ( (vmx_basic_msr_high & 0x1fff) > PAGE_SIZE )
-    {
-        printk("VMX: CPU%d VMCS size is too big (%u bytes)\n",
-               smp_processor_id(), vmx_basic_msr_high & 0x1fff);
-        return -EINVAL;
-    }
-
     /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
-    if ( vmx_basic_msr_high & (1u<<16) )
+    if ( vmx_basic_msr_high & (VMX_BASIC_32BIT_ADDRESSES >> 32) )
     {
         printk("VMX: CPU%d limits VMX structure pointers to 32 bits\n",
                smp_processor_id());
@@ -363,10 +371,12 @@ static int vmx_init_vmcs_config(void)
     }
 
     /* Require Write-Back (WB) memory type for VMCS accesses. */
-    if ( ((vmx_basic_msr_high >> 18) & 15) != 6 )
+    opt = (vmx_basic_msr_high & (VMX_BASIC_MEMORY_TYPE_MASK >> 32)) /
+          ((VMX_BASIC_MEMORY_TYPE_MASK & -VMX_BASIC_MEMORY_TYPE_MASK) >> 32);
+    if ( opt != MTRR_TYPE_WRBACK )
     {
         printk("VMX: CPU%d has unexpected VMCS access type %u\n",
-               smp_processor_id(), (vmx_basic_msr_high >> 18) & 15);
+               smp_processor_id(), opt);
         return -EINVAL;
     }
 
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -210,8 +210,6 @@ extern u32 vmx_vmentry_control;
 #define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING    0x00004000
 extern u32 vmx_secondary_exec_control;
 
-extern bool_t cpu_has_vmx_ins_outs_instr_info;
-
 #define VMX_EPT_EXEC_ONLY_SUPPORTED             0x00000001
 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED         0x00000040
 #define VMX_EPT_MEMORY_TYPE_UC                  0x00000100
@@ -278,6 +276,12 @@ extern bool_t cpu_has_vmx_ins_outs_instr
 #define VMX_INTR_SHADOW_SMI             0x00000004
 #define VMX_INTR_SHADOW_NMI             0x00000008
 
+#define VMX_BASIC_REVISION_MASK         0x7fffffff
+#define VMX_BASIC_VMCS_SIZE_MASK        (0x1fffULL << 32)
+#define VMX_BASIC_32BIT_ADDRESSES       (1ULL << 48)
+#define VMX_BASIC_DUAL_MONITOR          (1ULL << 49)
+#define VMX_BASIC_MEMORY_TYPE_MASK      (0xfULL << 50)
+#define VMX_BASIC_INS_OUT_INFO          (1ULL << 54)
 /* 
  * bit 55 of IA32_VMX_BASIC MSR, indicating whether any VMX controls that
  * default to 1 may be cleared to 0.
@@ -285,6 +289,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
 #define VMX_BASIC_DEFAULT1_ZERO		(1ULL << 55)
 
 extern u64 vmx_basic_msr;
+#define cpu_has_vmx_ins_outs_instr_info \
+    (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO))
 
 /* Guest interrupt status */
 #define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF



[-- Attachment #2: VMX-check-capabilities.patch --]
[-- Type: text/plain, Size: 6312 bytes --]

VMX: clean up capability checks

VMCS size validation on APs should check against BP's size.

No need for a separate cpu_has_vmx_ins_outs_instr_info variable
anymore.

Use proper symbolics.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -70,7 +70,6 @@ u32 vmx_secondary_exec_control __read_mo
 u32 vmx_vmexit_control __read_mostly;
 u32 vmx_vmentry_control __read_mostly;
 u64 vmx_ept_vpid_cap __read_mostly;
-bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
 
 static DEFINE_PER_CPU_READ_MOSTLY(struct vmcs_struct *, vmxon_region);
 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
@@ -294,24 +293,33 @@ static int vmx_init_vmcs_config(void)
     if ( !vmx_pin_based_exec_control )
     {
         /* First time through. */
-        vmcs_revision_id = vmx_basic_msr_low;
+        vmcs_revision_id           = vmx_basic_msr_low & VMX_BASIC_REVISION_MASK;
         vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
         vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
         vmx_secondary_exec_control = _vmx_secondary_exec_control;
         vmx_ept_vpid_cap           = _vmx_ept_vpid_cap;
         vmx_vmexit_control         = _vmx_vmexit_control;
         vmx_vmentry_control        = _vmx_vmentry_control;
-        cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
         vmx_basic_msr              = ((u64)vmx_basic_msr_high << 32) |
                                      vmx_basic_msr_low;
         vmx_display_features();
+
+        /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
+        if ( (vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32)) >
+             PAGE_SIZE )
+        {
+            printk("VMX: CPU%d VMCS size is too big (%Lu bytes)\n",
+                   smp_processor_id(),
+                   vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32));
+            return -EINVAL;
+        }
     }
     else
     {
         /* Globals are already initialised: re-check them. */
         mismatch |= cap_check(
             "VMCS revision ID",
-            vmcs_revision_id, vmx_basic_msr_low);
+            vmcs_revision_id, vmx_basic_msr_low & VMX_BASIC_REVISION_MASK);
         mismatch |= cap_check(
             "Pin-Based Exec Control",
             vmx_pin_based_exec_control, _vmx_pin_based_exec_control);
@@ -331,13 +339,21 @@ static int vmx_init_vmcs_config(void)
             "EPT and VPID Capability",
             vmx_ept_vpid_cap, _vmx_ept_vpid_cap);
         if ( cpu_has_vmx_ins_outs_instr_info !=
-             !!(vmx_basic_msr_high & (1U<<22)) )
+             !!(vmx_basic_msr_high & (VMX_BASIC_INS_OUT_INFO >> 32)) )
         {
             printk("VMX INS/OUTS Instruction Info: saw %d expected %d\n",
-                   !!(vmx_basic_msr_high & (1U<<22)),
+                   !!(vmx_basic_msr_high & (VMX_BASIC_INS_OUT_INFO >> 32)),
                    cpu_has_vmx_ins_outs_instr_info);
             mismatch = 1;
         }
+        if ( (vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32)) !=
+             ((vmx_basic_msr & VMX_BASIC_VMCS_SIZE_MASK) >> 32) )
+        {
+            printk("VMX: CPU%d unexpected VMCS size %Lu\n",
+                   smp_processor_id(),
+                   vmx_basic_msr_high & (VMX_BASIC_VMCS_SIZE_MASK >> 32));
+            mismatch = 1;
+        }
         if ( mismatch )
         {
             printk("VMX: Capabilities fatally differ between CPU%d and CPU0\n",
@@ -346,16 +362,8 @@ static int vmx_init_vmcs_config(void)
         }
     }
 
-    /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
-    if ( (vmx_basic_msr_high & 0x1fff) > PAGE_SIZE )
-    {
-        printk("VMX: CPU%d VMCS size is too big (%u bytes)\n",
-               smp_processor_id(), vmx_basic_msr_high & 0x1fff);
-        return -EINVAL;
-    }
-
     /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
-    if ( vmx_basic_msr_high & (1u<<16) )
+    if ( vmx_basic_msr_high & (VMX_BASIC_32BIT_ADDRESSES >> 32) )
     {
         printk("VMX: CPU%d limits VMX structure pointers to 32 bits\n",
                smp_processor_id());
@@ -363,10 +371,12 @@ static int vmx_init_vmcs_config(void)
     }
 
     /* Require Write-Back (WB) memory type for VMCS accesses. */
-    if ( ((vmx_basic_msr_high >> 18) & 15) != 6 )
+    opt = (vmx_basic_msr_high & (VMX_BASIC_MEMORY_TYPE_MASK >> 32)) /
+          ((VMX_BASIC_MEMORY_TYPE_MASK & -VMX_BASIC_MEMORY_TYPE_MASK) >> 32);
+    if ( opt != MTRR_TYPE_WRBACK )
     {
         printk("VMX: CPU%d has unexpected VMCS access type %u\n",
-               smp_processor_id(), (vmx_basic_msr_high >> 18) & 15);
+               smp_processor_id(), opt);
         return -EINVAL;
     }
 
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -210,8 +210,6 @@ extern u32 vmx_vmentry_control;
 #define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING    0x00004000
 extern u32 vmx_secondary_exec_control;
 
-extern bool_t cpu_has_vmx_ins_outs_instr_info;
-
 #define VMX_EPT_EXEC_ONLY_SUPPORTED             0x00000001
 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED         0x00000040
 #define VMX_EPT_MEMORY_TYPE_UC                  0x00000100
@@ -278,6 +276,12 @@ extern bool_t cpu_has_vmx_ins_outs_instr
 #define VMX_INTR_SHADOW_SMI             0x00000004
 #define VMX_INTR_SHADOW_NMI             0x00000008
 
+#define VMX_BASIC_REVISION_MASK         0x7fffffff
+#define VMX_BASIC_VMCS_SIZE_MASK        (0x1fffULL << 32)
+#define VMX_BASIC_32BIT_ADDRESSES       (1ULL << 48)
+#define VMX_BASIC_DUAL_MONITOR          (1ULL << 49)
+#define VMX_BASIC_MEMORY_TYPE_MASK      (0xfULL << 50)
+#define VMX_BASIC_INS_OUT_INFO          (1ULL << 54)
 /* 
  * bit 55 of IA32_VMX_BASIC MSR, indicating whether any VMX controls that
  * default to 1 may be cleared to 0.
@@ -285,6 +289,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
 #define VMX_BASIC_DEFAULT1_ZERO		(1ULL << 55)
 
 extern u64 vmx_basic_msr;
+#define cpu_has_vmx_ins_outs_instr_info \
+    (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO))
 
 /* Guest interrupt status */
 #define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v4 3/4] Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation
  2013-09-23 10:04 [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
  2013-09-23 10:10 ` [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs Jan Beulich
  2013-09-23 10:10 ` [PATCH v4 2/4] VMX: clean up capability checks Jan Beulich
@ 2013-09-23 10:11 ` Jan Beulich
  2013-09-23 10:12 ` [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers Jan Beulich
  2013-09-30 12:05 ` Ping: [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
  4 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2013-09-23 10:11 UTC (permalink / raw)
  To: xen-devel; +Cc: Yang Z Zhang, Keir Fraser, Eddie Dong, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 4342 bytes --]

Currently, it use hardcode value for IA32_VMX_CR4_FIXED1. This is wrong.
We should check guest's cpuid to know which bits are writeable in CR4 by guest
and allow the guest to set the corresponding bit only when guest has the feature.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>

Cleanup.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1813,7 +1813,7 @@ int nvmx_handle_invvpid(struct cpu_user_
 int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
 {
     struct vcpu *v = current;
-    unsigned int ecx, dummy;
+    unsigned int eax, ebx, ecx, edx, dummy;
     u64 data = 0, host_data = 0;
     int r = 1;
 
@@ -1821,7 +1821,7 @@ int nvmx_msr_read_intercept(unsigned int
         return 0;
 
     /* VMX capablity MSRs are available only when guest supports VMX. */
-    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &dummy);
+    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
     if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
         return 0;
 
@@ -1945,8 +1945,55 @@ int nvmx_msr_read_intercept(unsigned int
         data = X86_CR4_VMXE;
         break;
     case MSR_IA32_VMX_CR4_FIXED1:
-        /* allow 0-settings except SMXE */
-        data = 0x267ff & ~X86_CR4_SMXE;
+        if ( edx & cpufeat_mask(X86_FEATURE_VME) )
+            data |= X86_CR4_VME | X86_CR4_PVI;
+        if ( edx & cpufeat_mask(X86_FEATURE_TSC) )
+            data |= X86_CR4_TSD;
+        if ( edx & cpufeat_mask(X86_FEATURE_DE) )
+            data |= X86_CR4_DE;
+        if ( edx & cpufeat_mask(X86_FEATURE_PSE) )
+            data |= X86_CR4_PSE;
+        if ( edx & cpufeat_mask(X86_FEATURE_PAE) )
+            data |= X86_CR4_PAE;
+        if ( edx & cpufeat_mask(X86_FEATURE_MCE) )
+            data |= X86_CR4_MCE;
+        if ( edx & cpufeat_mask(X86_FEATURE_PGE) )
+            data |= X86_CR4_PGE;
+        if ( edx & cpufeat_mask(X86_FEATURE_FXSR) )
+            data |= X86_CR4_OSFXSR;
+        if ( edx & cpufeat_mask(X86_FEATURE_XMM) )
+            data |= X86_CR4_OSXMMEXCPT;
+        if ( ecx & cpufeat_mask(X86_FEATURE_VMXE) )
+            data |= X86_CR4_VMXE;
+        if ( ecx & cpufeat_mask(X86_FEATURE_SMXE) )
+            data |= X86_CR4_SMXE;
+        if ( ecx & cpufeat_mask(X86_FEATURE_PCID) )
+            data |= X86_CR4_PCIDE;
+        if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
+            data |= X86_CR4_OSXSAVE;
+
+        hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
+        switch ( eax )
+        {
+        default:
+            hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
+            /* Check whether guest has the perf monitor feature. */
+            if ( (eax & 0xff) && (eax & 0xff00) )
+                data |= X86_CR4_PCE;
+            /* fall through */
+        case 0x7 ... 0x9:
+            ecx = 0;
+            hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
+            if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
+                data |= X86_CR4_FSGSBASE;
+            if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )
+                data |= X86_CR4_SMEP;
+            if ( ebx & cpufeat_mask(X86_FEATURE_SMAP) )
+                data |= X86_CR4_SMAP;
+            /* fall through */
+        case 0x0 ... 0x6:
+            break;
+        }
         break;
     case MSR_IA32_VMX_MISC:
         /* Do not support CR3-target feature now */
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -148,6 +148,7 @@
 #define X86_FEATURE_INVPCID	(7*32+10) /* Invalidate Process Context ID */
 #define X86_FEATURE_RTM 	(7*32+11) /* Restricted Transactional Memory */
 #define X86_FEATURE_NO_FPU_SEL 	(7*32+13) /* FPU CS/DS stored as zero */
+#define X86_FEATURE_SMAP	(7*32+20) /* Supervisor Mode Access Prevention */
 
 #define cpu_has(c, bit)		test_bit(bit, (c)->x86_capability)
 #define boot_cpu_has(bit)	test_bit(bit, boot_cpu_data.x86_capability)
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -87,6 +87,7 @@
 #define X86_CR4_PCIDE		0x20000 /* enable PCID */
 #define X86_CR4_OSXSAVE	0x40000 /* enable XSAVE/XRSTOR */
 #define X86_CR4_SMEP		0x100000/* enable SMEP */
+#define X86_CR4_SMAP		0x200000/* enable SMAP */
 
 /*
  * Trap/fault mnemonics.



[-- Attachment #2: nVMX-CR4_FIXED1-emulation.patch --]
[-- Type: text/plain, Size: 4391 bytes --]

Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation

Currently, it use hardcode value for IA32_VMX_CR4_FIXED1. This is wrong.
We should check guest's cpuid to know which bits are writeable in CR4 by guest
and allow the guest to set the corresponding bit only when guest has the feature.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>

Cleanup.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1813,7 +1813,7 @@ int nvmx_handle_invvpid(struct cpu_user_
 int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
 {
     struct vcpu *v = current;
-    unsigned int ecx, dummy;
+    unsigned int eax, ebx, ecx, edx, dummy;
     u64 data = 0, host_data = 0;
     int r = 1;
 
@@ -1821,7 +1821,7 @@ int nvmx_msr_read_intercept(unsigned int
         return 0;
 
     /* VMX capablity MSRs are available only when guest supports VMX. */
-    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &dummy);
+    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
     if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
         return 0;
 
@@ -1945,8 +1945,55 @@ int nvmx_msr_read_intercept(unsigned int
         data = X86_CR4_VMXE;
         break;
     case MSR_IA32_VMX_CR4_FIXED1:
-        /* allow 0-settings except SMXE */
-        data = 0x267ff & ~X86_CR4_SMXE;
+        if ( edx & cpufeat_mask(X86_FEATURE_VME) )
+            data |= X86_CR4_VME | X86_CR4_PVI;
+        if ( edx & cpufeat_mask(X86_FEATURE_TSC) )
+            data |= X86_CR4_TSD;
+        if ( edx & cpufeat_mask(X86_FEATURE_DE) )
+            data |= X86_CR4_DE;
+        if ( edx & cpufeat_mask(X86_FEATURE_PSE) )
+            data |= X86_CR4_PSE;
+        if ( edx & cpufeat_mask(X86_FEATURE_PAE) )
+            data |= X86_CR4_PAE;
+        if ( edx & cpufeat_mask(X86_FEATURE_MCE) )
+            data |= X86_CR4_MCE;
+        if ( edx & cpufeat_mask(X86_FEATURE_PGE) )
+            data |= X86_CR4_PGE;
+        if ( edx & cpufeat_mask(X86_FEATURE_FXSR) )
+            data |= X86_CR4_OSFXSR;
+        if ( edx & cpufeat_mask(X86_FEATURE_XMM) )
+            data |= X86_CR4_OSXMMEXCPT;
+        if ( ecx & cpufeat_mask(X86_FEATURE_VMXE) )
+            data |= X86_CR4_VMXE;
+        if ( ecx & cpufeat_mask(X86_FEATURE_SMXE) )
+            data |= X86_CR4_SMXE;
+        if ( ecx & cpufeat_mask(X86_FEATURE_PCID) )
+            data |= X86_CR4_PCIDE;
+        if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
+            data |= X86_CR4_OSXSAVE;
+
+        hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
+        switch ( eax )
+        {
+        default:
+            hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
+            /* Check whether guest has the perf monitor feature. */
+            if ( (eax & 0xff) && (eax & 0xff00) )
+                data |= X86_CR4_PCE;
+            /* fall through */
+        case 0x7 ... 0x9:
+            ecx = 0;
+            hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
+            if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
+                data |= X86_CR4_FSGSBASE;
+            if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )
+                data |= X86_CR4_SMEP;
+            if ( ebx & cpufeat_mask(X86_FEATURE_SMAP) )
+                data |= X86_CR4_SMAP;
+            /* fall through */
+        case 0x0 ... 0x6:
+            break;
+        }
         break;
     case MSR_IA32_VMX_MISC:
         /* Do not support CR3-target feature now */
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -148,6 +148,7 @@
 #define X86_FEATURE_INVPCID	(7*32+10) /* Invalidate Process Context ID */
 #define X86_FEATURE_RTM 	(7*32+11) /* Restricted Transactional Memory */
 #define X86_FEATURE_NO_FPU_SEL 	(7*32+13) /* FPU CS/DS stored as zero */
+#define X86_FEATURE_SMAP	(7*32+20) /* Supervisor Mode Access Prevention */
 
 #define cpu_has(c, bit)		test_bit(bit, (c)->x86_capability)
 #define boot_cpu_has(bit)	test_bit(bit, boot_cpu_data.x86_capability)
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -87,6 +87,7 @@
 #define X86_CR4_PCIDE		0x20000 /* enable PCID */
 #define X86_CR4_OSXSAVE	0x40000 /* enable XSAVE/XRSTOR */
 #define X86_CR4_SMEP		0x100000/* enable SMEP */
+#define X86_CR4_SMAP		0x200000/* enable SMAP */
 
 /*
  * Trap/fault mnemonics.

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers
  2013-09-23 10:04 [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
                   ` (2 preceding siblings ...)
  2013-09-23 10:11 ` [PATCH v4 3/4] Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation Jan Beulich
@ 2013-09-23 10:12 ` Jan Beulich
  2013-09-23 10:34   ` Andrew Cooper
  2013-09-23 13:42   ` Boris Ostrovsky
  2013-09-30 12:05 ` Ping: [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
  4 siblings, 2 replies; 12+ messages in thread
From: Jan Beulich @ 2013-09-23 10:12 UTC (permalink / raw)
  To: xen-devel
  Cc: Keir Fraser, suravee.suthikulpanit, Jacob Shin, Eddie Dong,
	Jun Nakajima, Yang Z Zhang, Boris Ostrovsky

[-- Attachment #1: Type: text/plain, Size: 5250 bytes --]

Now that we other HVM code start making more extensive use of
hvm_cpuid(), let's not force every caller to declare dummy variables
for output not cared about.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2757,7 +2757,17 @@ void hvm_cpuid(unsigned int input, unsig
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned int count = *ecx;
+    unsigned int count, dummy = 0;
+
+    if ( !eax )
+        eax = &dummy;
+    if ( !ebx )
+        ebx = &dummy;
+    if ( !ecx )
+        ecx = &dummy;
+    count = *ecx;
+    if ( !edx )
+        edx = &dummy;
 
     if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
         return;
@@ -2765,7 +2775,7 @@ void hvm_cpuid(unsigned int input, unsig
     if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
         return;
 
-    domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx);
+    domain_cpuid(d, input, count, eax, ebx, ecx, edx);
 
     switch ( input )
     {
@@ -2853,15 +2863,15 @@ int hvm_msr_read_intercept(unsigned int 
 {
     struct vcpu *v = current;
     uint64_t *var_range_base, *fixed_range_base;
-    int index, mtrr;
-    uint32_t cpuid[4];
+    bool_t mtrr;
+    unsigned int edx, index;
     int ret = X86EMUL_OKAY;
 
     var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
     fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
 
-    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
-    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+    hvm_cpuid(1, NULL, NULL, NULL, &edx);
+    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
     switch ( msr )
     {
@@ -2969,15 +2979,15 @@ int hvm_msr_read_intercept(unsigned int 
 int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 {
     struct vcpu *v = current;
-    int index, mtrr;
-    uint32_t cpuid[4];
+    bool_t mtrr;
+    unsigned int edx, index;
     int ret = X86EMUL_OKAY;
 
     HVMTRACE_3D(MSR_WRITE, msr,
                (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
 
-    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
-    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+    hvm_cpuid(1, NULL, NULL, NULL, &edx);
+    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
     hvm_memory_event_msr(msr, msr_content);
 
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -806,13 +806,13 @@ static inline void svm_lwp_load(struct v
 /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
 static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
 {
-    unsigned int eax, ebx, ecx, edx;
+    unsigned int edx;
     uint32_t msr_low;
     static uint8_t lwp_intr_vector;
 
     if ( xsave_enabled(v) && cpu_has_lwp )
     {
-        hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+        hvm_cpuid(0x8000001c, NULL, NULL, NULL, &edx);
         msr_low = (uint32_t)msr_content;
         
         /* generate #GP if guest tries to turn on unsupported features. */
@@ -1163,10 +1163,10 @@ static void svm_init_erratum_383(struct 
 
 static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t read)
 {
-    uint eax, ebx, ecx, edx;
+    unsigned int ecx;
 
     /* Guest OSVW support */
-    hvm_cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+    hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
     if ( !test_bit((X86_FEATURE_OSVW & 31), &ecx) )
         return -1;
 
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1813,7 +1813,7 @@ int nvmx_handle_invvpid(struct cpu_user_
 int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
 {
     struct vcpu *v = current;
-    unsigned int eax, ebx, ecx, edx, dummy;
+    unsigned int eax, ebx, ecx, edx;
     u64 data = 0, host_data = 0;
     int r = 1;
 
@@ -1821,7 +1821,7 @@ int nvmx_msr_read_intercept(unsigned int
         return 0;
 
     /* VMX capablity MSRs are available only when guest supports VMX. */
-    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
+    hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
     if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
         return 0;
 
@@ -1972,18 +1972,18 @@ int nvmx_msr_read_intercept(unsigned int
         if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
             data |= X86_CR4_OSXSAVE;
 
-        hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
+        hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
         switch ( eax )
         {
         default:
-            hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
+            hvm_cpuid(0xa, &eax, NULL, NULL, NULL);
             /* Check whether guest has the perf monitor feature. */
             if ( (eax & 0xff) && (eax & 0xff00) )
                 data |= X86_CR4_PCE;
             /* fall through */
         case 0x7 ... 0x9:
             ecx = 0;
-            hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
+            hvm_cpuid(0x7, NULL, &ebx, &ecx, NULL);
             if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
                 data |= X86_CR4_FSGSBASE;
             if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )



[-- Attachment #2: x86-hvm_cpuid-tolerate-NULL.patch --]
[-- Type: text/plain, Size: 5294 bytes --]

x86: make hvm_cpuid() tolerate NULL pointers

Now that we other HVM code start making more extensive use of
hvm_cpuid(), let's not force every caller to declare dummy variables
for output not cared about.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2757,7 +2757,17 @@ void hvm_cpuid(unsigned int input, unsig
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned int count = *ecx;
+    unsigned int count, dummy = 0;
+
+    if ( !eax )
+        eax = &dummy;
+    if ( !ebx )
+        ebx = &dummy;
+    if ( !ecx )
+        ecx = &dummy;
+    count = *ecx;
+    if ( !edx )
+        edx = &dummy;
 
     if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
         return;
@@ -2765,7 +2775,7 @@ void hvm_cpuid(unsigned int input, unsig
     if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
         return;
 
-    domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx);
+    domain_cpuid(d, input, count, eax, ebx, ecx, edx);
 
     switch ( input )
     {
@@ -2853,15 +2863,15 @@ int hvm_msr_read_intercept(unsigned int 
 {
     struct vcpu *v = current;
     uint64_t *var_range_base, *fixed_range_base;
-    int index, mtrr;
-    uint32_t cpuid[4];
+    bool_t mtrr;
+    unsigned int edx, index;
     int ret = X86EMUL_OKAY;
 
     var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
     fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
 
-    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
-    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+    hvm_cpuid(1, NULL, NULL, NULL, &edx);
+    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
     switch ( msr )
     {
@@ -2969,15 +2979,15 @@ int hvm_msr_read_intercept(unsigned int 
 int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 {
     struct vcpu *v = current;
-    int index, mtrr;
-    uint32_t cpuid[4];
+    bool_t mtrr;
+    unsigned int edx, index;
     int ret = X86EMUL_OKAY;
 
     HVMTRACE_3D(MSR_WRITE, msr,
                (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
 
-    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
-    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+    hvm_cpuid(1, NULL, NULL, NULL, &edx);
+    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
     hvm_memory_event_msr(msr, msr_content);
 
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -806,13 +806,13 @@ static inline void svm_lwp_load(struct v
 /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
 static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
 {
-    unsigned int eax, ebx, ecx, edx;
+    unsigned int edx;
     uint32_t msr_low;
     static uint8_t lwp_intr_vector;
 
     if ( xsave_enabled(v) && cpu_has_lwp )
     {
-        hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+        hvm_cpuid(0x8000001c, NULL, NULL, NULL, &edx);
         msr_low = (uint32_t)msr_content;
         
         /* generate #GP if guest tries to turn on unsupported features. */
@@ -1163,10 +1163,10 @@ static void svm_init_erratum_383(struct 
 
 static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t read)
 {
-    uint eax, ebx, ecx, edx;
+    unsigned int ecx;
 
     /* Guest OSVW support */
-    hvm_cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+    hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
     if ( !test_bit((X86_FEATURE_OSVW & 31), &ecx) )
         return -1;
 
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1813,7 +1813,7 @@ int nvmx_handle_invvpid(struct cpu_user_
 int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
 {
     struct vcpu *v = current;
-    unsigned int eax, ebx, ecx, edx, dummy;
+    unsigned int eax, ebx, ecx, edx;
     u64 data = 0, host_data = 0;
     int r = 1;
 
@@ -1821,7 +1821,7 @@ int nvmx_msr_read_intercept(unsigned int
         return 0;
 
     /* VMX capablity MSRs are available only when guest supports VMX. */
-    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
+    hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
     if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
         return 0;
 
@@ -1972,18 +1972,18 @@ int nvmx_msr_read_intercept(unsigned int
         if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
             data |= X86_CR4_OSXSAVE;
 
-        hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
+        hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
         switch ( eax )
         {
         default:
-            hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
+            hvm_cpuid(0xa, &eax, NULL, NULL, NULL);
             /* Check whether guest has the perf monitor feature. */
             if ( (eax & 0xff) && (eax & 0xff00) )
                 data |= X86_CR4_PCE;
             /* fall through */
         case 0x7 ... 0x9:
             ecx = 0;
-            hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
+            hvm_cpuid(0x7, NULL, &ebx, &ecx, NULL);
             if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
                 data |= X86_CR4_FSGSBASE;
             if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers
  2013-09-23 10:12 ` [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers Jan Beulich
@ 2013-09-23 10:34   ` Andrew Cooper
  2013-09-23 13:42   ` Boris Ostrovsky
  1 sibling, 0 replies; 12+ messages in thread
From: Andrew Cooper @ 2013-09-23 10:34 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Keir Fraser, suravee.suthikulpanit, Jacob Shin, Eddie Dong,
	Jun Nakajima, Yang Z Zhang, xen-devel, Boris Ostrovsky


[-- Attachment #1.1: Type: text/plain, Size: 5778 bytes --]

On 23/09/13 11:12, Jan Beulich wrote:
> Now that we other HVM code start making more extensive use of
> hvm_cpuid(), let's not force every caller to declare dummy variables
> for output not cared about.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

This also looks to fix Coverities (IMHO valid) objection to
dereferencing ecx and storing it when passed an pointer to an
uninitialised variable.

>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2757,7 +2757,17 @@ void hvm_cpuid(unsigned int input, unsig
>  {
>      struct vcpu *v = current;
>      struct domain *d = v->domain;
> -    unsigned int count = *ecx;
> +    unsigned int count, dummy = 0;
> +
> +    if ( !eax )
> +        eax = &dummy;
> +    if ( !ebx )
> +        ebx = &dummy;
> +    if ( !ecx )
> +        ecx = &dummy;
> +    count = *ecx;
> +    if ( !edx )
> +        edx = &dummy;
>  
>      if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
>          return;
> @@ -2765,7 +2775,7 @@ void hvm_cpuid(unsigned int input, unsig
>      if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
>          return;
>  
> -    domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx);
> +    domain_cpuid(d, input, count, eax, ebx, ecx, edx);
>  
>      switch ( input )
>      {
> @@ -2853,15 +2863,15 @@ int hvm_msr_read_intercept(unsigned int 
>  {
>      struct vcpu *v = current;
>      uint64_t *var_range_base, *fixed_range_base;
> -    int index, mtrr;
> -    uint32_t cpuid[4];
> +    bool_t mtrr;
> +    unsigned int edx, index;
>      int ret = X86EMUL_OKAY;
>  
>      var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
>      fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
>  
> -    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
> -    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
> +    hvm_cpuid(1, NULL, NULL, NULL, &edx);
> +    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
>  
>      switch ( msr )
>      {
> @@ -2969,15 +2979,15 @@ int hvm_msr_read_intercept(unsigned int 
>  int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
>  {
>      struct vcpu *v = current;
> -    int index, mtrr;
> -    uint32_t cpuid[4];
> +    bool_t mtrr;
> +    unsigned int edx, index;
>      int ret = X86EMUL_OKAY;
>  
>      HVMTRACE_3D(MSR_WRITE, msr,
>                 (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
>  
> -    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
> -    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
> +    hvm_cpuid(1, NULL, NULL, NULL, &edx);
> +    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
>  
>      hvm_memory_event_msr(msr, msr_content);
>  
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -806,13 +806,13 @@ static inline void svm_lwp_load(struct v
>  /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
>  static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
>  {
> -    unsigned int eax, ebx, ecx, edx;
> +    unsigned int edx;
>      uint32_t msr_low;
>      static uint8_t lwp_intr_vector;
>  
>      if ( xsave_enabled(v) && cpu_has_lwp )
>      {
> -        hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
> +        hvm_cpuid(0x8000001c, NULL, NULL, NULL, &edx);
>          msr_low = (uint32_t)msr_content;
>          
>          /* generate #GP if guest tries to turn on unsupported features. */
> @@ -1163,10 +1163,10 @@ static void svm_init_erratum_383(struct 
>  
>  static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t read)
>  {
> -    uint eax, ebx, ecx, edx;
> +    unsigned int ecx;
>  
>      /* Guest OSVW support */
> -    hvm_cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
> +    hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
>      if ( !test_bit((X86_FEATURE_OSVW & 31), &ecx) )
>          return -1;
>  
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1813,7 +1813,7 @@ int nvmx_handle_invvpid(struct cpu_user_
>  int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
>  {
>      struct vcpu *v = current;
> -    unsigned int eax, ebx, ecx, edx, dummy;
> +    unsigned int eax, ebx, ecx, edx;
>      u64 data = 0, host_data = 0;
>      int r = 1;
>  
> @@ -1821,7 +1821,7 @@ int nvmx_msr_read_intercept(unsigned int
>          return 0;
>  
>      /* VMX capablity MSRs are available only when guest supports VMX. */
> -    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
> +    hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
>      if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
>          return 0;
>  
> @@ -1972,18 +1972,18 @@ int nvmx_msr_read_intercept(unsigned int
>          if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
>              data |= X86_CR4_OSXSAVE;
>  
> -        hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
> +        hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
>          switch ( eax )
>          {
>          default:
> -            hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
> +            hvm_cpuid(0xa, &eax, NULL, NULL, NULL);
>              /* Check whether guest has the perf monitor feature. */
>              if ( (eax & 0xff) && (eax & 0xff00) )
>                  data |= X86_CR4_PCE;
>              /* fall through */
>          case 0x7 ... 0x9:
>              ecx = 0;
> -            hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
> +            hvm_cpuid(0x7, NULL, &ebx, &ecx, NULL);
>              if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
>                  data |= X86_CR4_FSGSBASE;
>              if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel


[-- Attachment #1.2: Type: text/html, Size: 6692 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers
  2013-09-23 10:12 ` [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers Jan Beulich
  2013-09-23 10:34   ` Andrew Cooper
@ 2013-09-23 13:42   ` Boris Ostrovsky
  2013-09-23 13:45     ` Jan Beulich
  1 sibling, 1 reply; 12+ messages in thread
From: Boris Ostrovsky @ 2013-09-23 13:42 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Keir Fraser, suravee.suthikulpanit, Eddie Dong, Jacob Shin,
	Jun Nakajima, Yang Z Zhang, xen-devel

On 09/23/2013 06:12 AM, Jan Beulich wrote:
> Now that we other HVM code start making more extensive use of

Something is wrong with this sentence. I think "we" is not needed.

Other than that

Acked-by:  Boris Ostrovsky <boris.ostrovsky@oracle.com>


-boris

> hvm_cpuid(), let's not force every caller to declare dummy variables
> for output not cared about.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2757,7 +2757,17 @@ void hvm_cpuid(unsigned int input, unsig
>   {
>       struct vcpu *v = current;
>       struct domain *d = v->domain;
> -    unsigned int count = *ecx;
> +    unsigned int count, dummy = 0;
> +
> +    if ( !eax )
> +        eax = &dummy;
> +    if ( !ebx )
> +        ebx = &dummy;
> +    if ( !ecx )
> +        ecx = &dummy;
> +    count = *ecx;
> +    if ( !edx )
> +        edx = &dummy;
>   
>       if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
>           return;
> @@ -2765,7 +2775,7 @@ void hvm_cpuid(unsigned int input, unsig
>       if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
>           return;
>   
> -    domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx);
> +    domain_cpuid(d, input, count, eax, ebx, ecx, edx);
>   
>       switch ( input )
>       {
> @@ -2853,15 +2863,15 @@ int hvm_msr_read_intercept(unsigned int
>   {
>       struct vcpu *v = current;
>       uint64_t *var_range_base, *fixed_range_base;
> -    int index, mtrr;
> -    uint32_t cpuid[4];
> +    bool_t mtrr;
> +    unsigned int edx, index;
>       int ret = X86EMUL_OKAY;
>   
>       var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
>       fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
>   
> -    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
> -    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
> +    hvm_cpuid(1, NULL, NULL, NULL, &edx);
> +    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
>   
>       switch ( msr )
>       {
> @@ -2969,15 +2979,15 @@ int hvm_msr_read_intercept(unsigned int
>   int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
>   {
>       struct vcpu *v = current;
> -    int index, mtrr;
> -    uint32_t cpuid[4];
> +    bool_t mtrr;
> +    unsigned int edx, index;
>       int ret = X86EMUL_OKAY;
>   
>       HVMTRACE_3D(MSR_WRITE, msr,
>                  (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
>   
> -    hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
> -    mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
> +    hvm_cpuid(1, NULL, NULL, NULL, &edx);
> +    mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
>   
>       hvm_memory_event_msr(msr, msr_content);
>   
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -806,13 +806,13 @@ static inline void svm_lwp_load(struct v
>   /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
>   static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
>   {
> -    unsigned int eax, ebx, ecx, edx;
> +    unsigned int edx;
>       uint32_t msr_low;
>       static uint8_t lwp_intr_vector;
>   
>       if ( xsave_enabled(v) && cpu_has_lwp )
>       {
> -        hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
> +        hvm_cpuid(0x8000001c, NULL, NULL, NULL, &edx);
>           msr_low = (uint32_t)msr_content;
>           
>           /* generate #GP if guest tries to turn on unsupported features. */
> @@ -1163,10 +1163,10 @@ static void svm_init_erratum_383(struct
>   
>   static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t read)
>   {
> -    uint eax, ebx, ecx, edx;
> +    unsigned int ecx;
>   
>       /* Guest OSVW support */
> -    hvm_cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
> +    hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
>       if ( !test_bit((X86_FEATURE_OSVW & 31), &ecx) )
>           return -1;
>   
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1813,7 +1813,7 @@ int nvmx_handle_invvpid(struct cpu_user_
>   int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
>   {
>       struct vcpu *v = current;
> -    unsigned int eax, ebx, ecx, edx, dummy;
> +    unsigned int eax, ebx, ecx, edx;
>       u64 data = 0, host_data = 0;
>       int r = 1;
>   
> @@ -1821,7 +1821,7 @@ int nvmx_msr_read_intercept(unsigned int
>           return 0;
>   
>       /* VMX capablity MSRs are available only when guest supports VMX. */
> -    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
> +    hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
>       if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
>           return 0;
>   
> @@ -1972,18 +1972,18 @@ int nvmx_msr_read_intercept(unsigned int
>           if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
>               data |= X86_CR4_OSXSAVE;
>   
> -        hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
> +        hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
>           switch ( eax )
>           {
>           default:
> -            hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
> +            hvm_cpuid(0xa, &eax, NULL, NULL, NULL);
>               /* Check whether guest has the perf monitor feature. */
>               if ( (eax & 0xff) && (eax & 0xff00) )
>                   data |= X86_CR4_PCE;
>               /* fall through */
>           case 0x7 ... 0x9:
>               ecx = 0;
> -            hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
> +            hvm_cpuid(0x7, NULL, &ebx, &ecx, NULL);
>               if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
>                   data |= X86_CR4_FSGSBASE;
>               if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )
>
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers
  2013-09-23 13:42   ` Boris Ostrovsky
@ 2013-09-23 13:45     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2013-09-23 13:45 UTC (permalink / raw)
  To: Boris Ostrovsky
  Cc: Keir Fraser, suravee.suthikulpanit, Jacob Shin, Eddie Dong,
	Jun Nakajima, Yang Z Zhang, xen-devel

>>> On 23.09.13 at 15:42, Boris Ostrovsky <boris.ostrovsky@oracle.com> wrote:
> On 09/23/2013 06:12 AM, Jan Beulich wrote:
>> Now that we other HVM code start making more extensive use of
> 
> Something is wrong with this sentence. I think "we" is not needed.

Oh, yes. Dropped.

> Other than that
> 
> Acked-by:  Boris Ostrovsky <boris.ostrovsky@oracle.com>

Thanks!

Jan

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs
  2013-09-23 10:10 ` [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs Jan Beulich
@ 2013-09-23 14:50   ` Boris Ostrovsky
  2013-09-23 14:52     ` Jan Beulich
  0 siblings, 1 reply; 12+ messages in thread
From: Boris Ostrovsky @ 2013-09-23 14:50 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Yang Z Zhang, xen-devel, Eddie Dong, Jun Nakajima

On 09/23/2013 06:10 AM, Jan Beulich wrote:
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1813,12 +1813,33 @@ int nvmx_handle_invvpid(struct cpu_user_
>   int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
>   {
>       struct vcpu *v = current;
> +    unsigned int ecx, dummy;
>       u64 data = 0, host_data = 0;
>       int r = 1;
>   
>       if ( !nestedhvm_enabled(v->domain) )
>           return 0;
>   
> +    /* VMX capablity MSRs are available only when guest supports VMX. */
> +    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &dummy);

Depending on order in which you are going to commit patches you could 
change hvm_cpuid() dummy arguments to NULLs to make use of your "make 
hvm_cpuid() ..." patch. In [3/4] as well.


-boris

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs
  2013-09-23 14:50   ` Boris Ostrovsky
@ 2013-09-23 14:52     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2013-09-23 14:52 UTC (permalink / raw)
  To: Boris Ostrovsky; +Cc: Yang Z Zhang, xen-devel, Eddie Dong, Jun Nakajima

>>> On 23.09.13 at 16:50, Boris Ostrovsky <boris.ostrovsky@oracle.com> wrote:
> On 09/23/2013 06:10 AM, Jan Beulich wrote:
>> --- a/xen/arch/x86/hvm/vmx/vvmx.c
>> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
>> @@ -1813,12 +1813,33 @@ int nvmx_handle_invvpid(struct cpu_user_
>>   int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
>>   {
>>       struct vcpu *v = current;
>> +    unsigned int ecx, dummy;
>>       u64 data = 0, host_data = 0;
>>       int r = 1;
>>   
>>       if ( !nestedhvm_enabled(v->domain) )
>>           return 0;
>>   
>> +    /* VMX capablity MSRs are available only when guest supports VMX. */
>> +    hvm_cpuid(0x1, &dummy, &dummy, &ecx, &dummy);
> 
> Depending on order in which you are going to commit patches you could 
> change hvm_cpuid() dummy arguments to NULLs to make use of your "make 
> hvm_cpuid() ..." patch. In [3/4] as well.

That last patch in the series does just that.

Jan

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Ping: [PATCH v4 0/4] x86/HVM: miscellaneous improvements
  2013-09-23 10:04 [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
                   ` (3 preceding siblings ...)
  2013-09-23 10:12 ` [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers Jan Beulich
@ 2013-09-30 12:05 ` Jan Beulich
  2013-10-02 15:48   ` Nakajima, Jun
  4 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2013-09-30 12:05 UTC (permalink / raw)
  To: Eddie Dong, Jun Nakajima; +Cc: Yang Z Zhang, xen-devel

>>> On 23.09.13 at 12:04, "Jan Beulich" <JBeulich@suse.com> wrote:
> The first and third patches are cleaned up versions of an earlier v3
> submission by Yang.
> 
> 1: Nested VMX: check VMX capability before read VMX related MSRs
> 2: VMX: clean up capability checks
> 3: Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation
> 4: x86: make hvm_cpuid() tolerate NULL pointers
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org 
> http://lists.xen.org/xen-devel 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: Ping: [PATCH v4 0/4] x86/HVM: miscellaneous improvements
  2013-09-30 12:05 ` Ping: [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
@ 2013-10-02 15:48   ` Nakajima, Jun
  0 siblings, 0 replies; 12+ messages in thread
From: Nakajima, Jun @ 2013-10-02 15:48 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Yang Z Zhang, xen-devel, Eddie Dong


[-- Attachment #1.1: Type: text/plain, Size: 615 bytes --]

On Mon, Sep 30, 2013 at 5:05 AM, Jan Beulich <JBeulich@suse.com> wrote:

> >>> On 23.09.13 at 12:04, "Jan Beulich" <JBeulich@suse.com> wrote:
> > The first and third patches are cleaned up versions of an earlier v3
> > submission by Yang.
> >
> > 1: Nested VMX: check VMX capability before read VMX related MSRs
> > 2: VMX: clean up capability checks
> > 3: Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation
> > 4: x86: make hvm_cpuid() tolerate NULL pointers
> >
> > Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >
>

Acked-by: Jun Nakajima <jun.nakajima@intel.com>

-- 
Jun
Intel Open Source Technology Center

[-- Attachment #1.2: Type: text/html, Size: 1244 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2013-10-02 15:48 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-09-23 10:04 [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
2013-09-23 10:10 ` [PATCH v4 1/4] Nested VMX: check VMX capability before read VMX related MSRs Jan Beulich
2013-09-23 14:50   ` Boris Ostrovsky
2013-09-23 14:52     ` Jan Beulich
2013-09-23 10:10 ` [PATCH v4 2/4] VMX: clean up capability checks Jan Beulich
2013-09-23 10:11 ` [PATCH v4 3/4] Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation Jan Beulich
2013-09-23 10:12 ` [PATCH v4 4/4] x86: make hvm_cpuid() tolerate NULL pointers Jan Beulich
2013-09-23 10:34   ` Andrew Cooper
2013-09-23 13:42   ` Boris Ostrovsky
2013-09-23 13:45     ` Jan Beulich
2013-09-30 12:05 ` Ping: [PATCH v4 0/4] x86/HVM: miscellaneous improvements Jan Beulich
2013-10-02 15:48   ` Nakajima, Jun

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.