All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5] x86/HVM: make hvm_efer_valid() honor guest features
@ 2015-01-22 13:56 Jan Beulich
  2015-01-23 14:03 ` Andrew Cooper
  0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2015-01-22 13:56 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 4527 bytes --]

Following the earlier similar change validating CR4 modifications.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v5: relax SCE check
v4: Drop hvm_cpuid() adjustment and use hvm_funcs.cpuid_intercept()
    instead for leaf 0x80000001.
v3: Drop cr0_pg > 0 test for LMA/LME check: This would need to be >= 0,
    which is then redundant with the check for EFER_LMA (getting
    cleared when cr0_pg gets passed a negative value). Force SYSCALL
    feature flag on when guest is in 64-bit mode.
v2: consider CR0.PG during restore when checking EFER.LMA

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1672,20 +1672,64 @@ static int hvm_save_cpu_ctxt(struct doma
     return 0;
 }
 
-static bool_t hvm_efer_valid(struct domain *d,
-                             uint64_t value, uint64_t efer_validbits)
+static bool_t hvm_efer_valid(const struct vcpu *v, uint64_t value,
+                             signed int cr0_pg)
 {
-    if ( nestedhvm_enabled(d) && cpu_has_svm )
-        efer_validbits |= EFER_SVME;
+    unsigned int ext1_ecx = 0, ext1_edx = 0;
 
-    return !((value & ~efer_validbits) ||
-             ((sizeof(long) != 8) && (value & EFER_LME)) ||
-             (!cpu_has_svm && (value & EFER_SVME)) ||
-             (!cpu_has_nx && (value & EFER_NX)) ||
-             (!cpu_has_syscall && (value & EFER_SCE)) ||
-             (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
-             (!cpu_has_ffxsr && (value & EFER_FFXSE)) ||
-             ((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
+    if ( cr0_pg < 0 && !is_hardware_domain(v->domain) )
+    {
+        unsigned int level;
+
+        ASSERT(v == current);
+        hvm_cpuid(0x80000000, &level, NULL, NULL, NULL);
+        if ( level >= 0x80000001 )
+        {
+            unsigned int dummy;
+
+            level = 0x80000001;
+            hvm_funcs.cpuid_intercept(&level, &dummy, &ext1_ecx, &ext1_edx);
+        }
+    }
+    else
+    {
+        ext1_edx = boot_cpu_data.x86_capability[X86_FEATURE_LM / 32];
+        ext1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_SVM / 32];
+    }
+
+    /*
+     * Guests may want to set EFER.SCE and EFER.LME at the same time, so we
+     * can't make the check depend on only X86_FEATURE_SYSCALL (which on VMX
+     * will be clear without the guest having entered 64-bit mode).
+     */
+    if ( (value & EFER_SCE) &&
+         !(ext1_edx & cpufeat_mask(X86_FEATURE_SYSCALL)) &&
+         (cr0_pg >= 0 || !(value & EFER_LME)) )
+        return 0;
+
+    if ( (value & (EFER_LME | EFER_LMA)) &&
+         !(ext1_edx & cpufeat_mask(X86_FEATURE_LM)) )
+        return 0;
+
+    if ( (value & EFER_LMA) && (!(value & EFER_LME) || !cr0_pg) )
+        return 0;
+
+    if ( (value & EFER_NX) && !(ext1_edx & cpufeat_mask(X86_FEATURE_NX)) )
+        return 0;
+
+    if ( (value & EFER_SVME) &&
+         (!(ext1_ecx & cpufeat_mask(X86_FEATURE_SVM)) ||
+          !nestedhvm_enabled(v->domain)) )
+        return 0;
+
+    if ( (value & EFER_LMSLE) && !cpu_has_lmsl )
+        return 0;
+
+    if ( (value & EFER_FFXSE) &&
+         !(ext1_edx & cpufeat_mask(X86_FEATURE_FFXSR)) )
+        return 0;
+
+    return 1;
 }
 
 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
@@ -1763,7 +1807,6 @@ static int hvm_load_cpu_ctxt(struct doma
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
     struct segment_register seg;
-    uint64_t efer_validbits;
 
     /* Which vcpu is this? */
     vcpuid = hvm_load_instance(h);
@@ -1794,9 +1837,7 @@ static int hvm_load_cpu_ctxt(struct doma
         return -EINVAL;
     }
 
-    efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA
-                   | EFER_NX | EFER_SCE;
-    if ( !hvm_efer_valid(d, ctxt.msr_efer, efer_validbits) )
+    if ( !hvm_efer_valid(v, ctxt.msr_efer, MASK_EXTR(ctxt.cr0, X86_CR0_PG)) )
     {
         printk(XENLOG_G_ERR "HVM%d restore: bad EFER %#" PRIx64 "\n",
                d->domain_id, ctxt.msr_efer);
@@ -2936,12 +2977,10 @@ err:
 int hvm_set_efer(uint64_t value)
 {
     struct vcpu *v = current;
-    uint64_t efer_validbits;
 
     value &= ~EFER_LMA;
 
-    efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE;
-    if ( !hvm_efer_valid(v->domain, value, efer_validbits) )
+    if ( !hvm_efer_valid(v, value, -1) )
     {
         gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
                  "EFER: %#"PRIx64"\n", value);



[-- Attachment #2: x86-HVM-refine-EFER-reserved-bits-checks.patch --]
[-- Type: text/plain, Size: 4578 bytes --]

x86/HVM: make hvm_efer_valid() honor guest features

Following the earlier similar change validating CR4 modifications.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v5: relax SCE check
v4: Drop hvm_cpuid() adjustment and use hvm_funcs.cpuid_intercept()
    instead for leaf 0x80000001.
v3: Drop cr0_pg > 0 test for LMA/LME check: This would need to be >= 0,
    which is then redundant with the check for EFER_LMA (getting
    cleared when cr0_pg gets passed a negative value). Force SYSCALL
    feature flag on when guest is in 64-bit mode.
v2: consider CR0.PG during restore when checking EFER.LMA

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1672,20 +1672,64 @@ static int hvm_save_cpu_ctxt(struct doma
     return 0;
 }
 
-static bool_t hvm_efer_valid(struct domain *d,
-                             uint64_t value, uint64_t efer_validbits)
+static bool_t hvm_efer_valid(const struct vcpu *v, uint64_t value,
+                             signed int cr0_pg)
 {
-    if ( nestedhvm_enabled(d) && cpu_has_svm )
-        efer_validbits |= EFER_SVME;
+    unsigned int ext1_ecx = 0, ext1_edx = 0;
 
-    return !((value & ~efer_validbits) ||
-             ((sizeof(long) != 8) && (value & EFER_LME)) ||
-             (!cpu_has_svm && (value & EFER_SVME)) ||
-             (!cpu_has_nx && (value & EFER_NX)) ||
-             (!cpu_has_syscall && (value & EFER_SCE)) ||
-             (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
-             (!cpu_has_ffxsr && (value & EFER_FFXSE)) ||
-             ((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
+    if ( cr0_pg < 0 && !is_hardware_domain(v->domain) )
+    {
+        unsigned int level;
+
+        ASSERT(v == current);
+        hvm_cpuid(0x80000000, &level, NULL, NULL, NULL);
+        if ( level >= 0x80000001 )
+        {
+            unsigned int dummy;
+
+            level = 0x80000001;
+            hvm_funcs.cpuid_intercept(&level, &dummy, &ext1_ecx, &ext1_edx);
+        }
+    }
+    else
+    {
+        ext1_edx = boot_cpu_data.x86_capability[X86_FEATURE_LM / 32];
+        ext1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_SVM / 32];
+    }
+
+    /*
+     * Guests may want to set EFER.SCE and EFER.LME at the same time, so we
+     * can't make the check depend on only X86_FEATURE_SYSCALL (which on VMX
+     * will be clear without the guest having entered 64-bit mode).
+     */
+    if ( (value & EFER_SCE) &&
+         !(ext1_edx & cpufeat_mask(X86_FEATURE_SYSCALL)) &&
+         (cr0_pg >= 0 || !(value & EFER_LME)) )
+        return 0;
+
+    if ( (value & (EFER_LME | EFER_LMA)) &&
+         !(ext1_edx & cpufeat_mask(X86_FEATURE_LM)) )
+        return 0;
+
+    if ( (value & EFER_LMA) && (!(value & EFER_LME) || !cr0_pg) )
+        return 0;
+
+    if ( (value & EFER_NX) && !(ext1_edx & cpufeat_mask(X86_FEATURE_NX)) )
+        return 0;
+
+    if ( (value & EFER_SVME) &&
+         (!(ext1_ecx & cpufeat_mask(X86_FEATURE_SVM)) ||
+          !nestedhvm_enabled(v->domain)) )
+        return 0;
+
+    if ( (value & EFER_LMSLE) && !cpu_has_lmsl )
+        return 0;
+
+    if ( (value & EFER_FFXSE) &&
+         !(ext1_edx & cpufeat_mask(X86_FEATURE_FFXSR)) )
+        return 0;
+
+    return 1;
 }
 
 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
@@ -1763,7 +1807,6 @@ static int hvm_load_cpu_ctxt(struct doma
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
     struct segment_register seg;
-    uint64_t efer_validbits;
 
     /* Which vcpu is this? */
     vcpuid = hvm_load_instance(h);
@@ -1794,9 +1837,7 @@ static int hvm_load_cpu_ctxt(struct doma
         return -EINVAL;
     }
 
-    efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA
-                   | EFER_NX | EFER_SCE;
-    if ( !hvm_efer_valid(d, ctxt.msr_efer, efer_validbits) )
+    if ( !hvm_efer_valid(v, ctxt.msr_efer, MASK_EXTR(ctxt.cr0, X86_CR0_PG)) )
     {
         printk(XENLOG_G_ERR "HVM%d restore: bad EFER %#" PRIx64 "\n",
                d->domain_id, ctxt.msr_efer);
@@ -2936,12 +2977,10 @@ err:
 int hvm_set_efer(uint64_t value)
 {
     struct vcpu *v = current;
-    uint64_t efer_validbits;
 
     value &= ~EFER_LMA;
 
-    efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE;
-    if ( !hvm_efer_valid(v->domain, value, efer_validbits) )
+    if ( !hvm_efer_valid(v, value, -1) )
     {
         gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
                  "EFER: %#"PRIx64"\n", value);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH v5] x86/HVM: make hvm_efer_valid() honor guest features
  2015-01-22 13:56 [PATCH v5] x86/HVM: make hvm_efer_valid() honor guest features Jan Beulich
@ 2015-01-23 14:03 ` Andrew Cooper
  0 siblings, 0 replies; 2+ messages in thread
From: Andrew Cooper @ 2015-01-23 14:03 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser

On 22/01/15 13:56, Jan Beulich wrote:
> Following the earlier similar change validating CR4 modifications.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-and-tested-by: Andrew Cooper <andrew.cooper3@citrix.com>

There were some test failures, but they can't plausibly be related to
this change.  I will investigate those separately.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2015-01-23 14:03 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-22 13:56 [PATCH v5] x86/HVM: make hvm_efer_valid() honor guest features Jan Beulich
2015-01-23 14:03 ` Andrew Cooper

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.