* [PATCH 1/2] KVM: x86: update cpuid according to IA32_MISC_ENABLE
2014-08-20 13:58 [PATCH 0/2] KVM: vmx: Supporting IA32_MISC_ENABLE MSR Nadav Amit
@ 2014-08-20 13:58 ` Nadav Amit
2014-08-20 13:58 ` [PATCH 2/2] KVM: vmx: Reflect misc_enables in real CPU Nadav Amit
2014-08-20 14:00 ` [PATCH] x86: Test debug exceptions with disabled fast-string Nadav Amit
2 siblings, 0 replies; 6+ messages in thread
From: Nadav Amit @ 2014-08-20 13:58 UTC (permalink / raw)
To: pbonzini; +Cc: joro, kvm, nadav.amit, Nadav Amit
Virtual BIOS may use the "Limit CPUID Maxval" and "XD Bit Disable" fields in
IA32_MISC_ENABLE. These two fields update the CPUID, and in the case of "XD Bit
Disable" also disable NX support.
This patch reflects this behavior in CPUID, and disables NX bit accordingly.
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
---
arch/x86/kvm/cpuid.c | 20 ++++++++++++++++++++
arch/x86/kvm/vmx.c | 8 ++++++--
2 files changed, 26 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 38a0afe..ff7f429 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -757,6 +757,25 @@ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
}
+static void cpuid_override(struct kvm_vcpu *vcpu, u32 function, u32 index,
+ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
+{
+ switch (function) {
+ case 0:
+ if (vcpu->arch.ia32_misc_enable_msr &
+ MSR_IA32_MISC_ENABLE_LIMIT_CPUID)
+ *eax = min_t(u32, *eax, 3);
+ break;
+ case 1:
+ if (vcpu->arch.ia32_misc_enable_msr &
+ MSR_IA32_MISC_ENABLE_XD_DISABLE)
+ *edx &= ~bit(X86_FEATURE_NX);
+ break;
+ default:
+ break;
+ }
+}
+
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
u32 function = *eax, index = *ecx;
@@ -774,6 +793,7 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
*edx = best->edx;
} else
*eax = *ebx = *ecx = *edx = 0;
+ cpuid_override(vcpu, function, index, eax, ebx, ecx, edx);
trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
}
EXPORT_SYMBOL_GPL(kvm_cpuid);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cad37d5..45bab55 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1633,9 +1633,13 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER);
+ /* Clear nx according if xd_disable is on */
+ guest_efer = vmx->vcpu.arch.efer;
+ if (vmx->vcpu.arch.ia32_misc_enable_msr &
+ MSR_IA32_MISC_ENABLE_XD_DISABLE)
+ guest_efer &= ~EFER_NX;
/* On ept, can't emulate nx, and must switch nx atomically */
- if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
- guest_efer = vmx->vcpu.arch.efer;
+ if (enable_ept && ((guest_efer ^ host_efer) & EFER_NX)) {
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
--
1.9.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/2] KVM: vmx: Reflect misc_enables in real CPU
2014-08-20 13:58 [PATCH 0/2] KVM: vmx: Supporting IA32_MISC_ENABLE MSR Nadav Amit
2014-08-20 13:58 ` [PATCH 1/2] KVM: x86: update cpuid according to IA32_MISC_ENABLE Nadav Amit
@ 2014-08-20 13:58 ` Nadav Amit
2014-08-21 11:51 ` Paolo Bonzini
2014-08-20 14:00 ` [PATCH] x86: Test debug exceptions with disabled fast-string Nadav Amit
2 siblings, 1 reply; 6+ messages in thread
From: Nadav Amit @ 2014-08-20 13:58 UTC (permalink / raw)
To: pbonzini; +Cc: joro, kvm, nadav.amit, Nadav Amit
IA32_MISC_ENABLE MSR has two bits that affect the actual results which can be
observed by the guest: fast string enable, and FOPCODE compatibility. Guests
may wish to change the default settings of these bits.
Linux usually enables fast-string by default. However, when "fast string" is
enabled data breakpoints are only recognized on boundaries between data-groups.
On some old CPUs enabling fast-string also resulted in single-step not
occurring upon each iteration.
FOPCODE compatibility can be used to analyze program performance by recording
the last instruction executed before FSAVE/FSTENV/FXSAVE.
This patch saves and restores these bits in IA32_MISC_ENABLE if they are
supported upon entry to guest and exit to userspace respectively. To avoid
possible issues, fast-string can only be enabled by the guest if the host
enabled them. The physical CPU version is checked to ensure no shared bits are
reconfigured in the process.
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm.c | 7 ++++++
arch/x86/kvm/vmx.c | 56 +++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/x86.c | 2 +-
4 files changed, 65 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4bda61b..879b930 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -699,6 +699,7 @@ struct kvm_x86_ops {
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
+ void (*set_misc_enable)(struct kvm_vcpu *vcpu, u64 data);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1f49c86..378e50e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -480,6 +480,11 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
}
+static void svm_set_misc_enable(struct kvm_vcpu *vcpu, u64 data)
+{
+ vcpu->arch.ia32_misc_enable_msr = data;
+}
+
static int is_external_interrupt(u32 info)
{
info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@ -1152,6 +1157,7 @@ static void init_vmcb(struct vcpu_svm *svm)
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
svm_set_efer(&svm->vcpu, 0);
+ svm_set_misc_enable(&svm->vcpu, 0);
save->dr6 = 0xffff0ff0;
kvm_set_rflags(&svm->vcpu, 2);
save->rip = 0x0000fff0;
@@ -4338,6 +4344,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_cr3 = svm_set_cr3,
.set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer,
+ .set_misc_enable = svm_set_misc_enable,
.get_idt = svm_get_idt,
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 45bab55..2d2efd0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -809,6 +809,8 @@ static const struct kvm_vmx_segment_field {
};
static u64 host_efer;
+static u64 host_misc_enable;
+static u64 guest_misc_enable_mask;
static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
@@ -1609,6 +1611,33 @@ static void reload_tss(void)
load_TR_desc();
}
+static void __init update_guest_misc_enable_mask(void)
+{
+ /* Calculating which of the IA32_MISC_ENABLE bits should be reflected
+ in hardware */
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ u64 data;
+
+ guest_misc_enable_mask = 0;
+
+ /* Core/Atom architecture share fast-string and x86 compat */
+ if (c->x86 != 6 || c->x86_model < 0xd)
+ return;
+
+ if (rdmsrl_safe(MSR_IA32_MISC_ENABLE, &data) < 0)
+ return;
+ if (boot_cpu_has(X86_FEATURE_REP_GOOD))
+ guest_misc_enable_mask |= MSR_IA32_MISC_ENABLE_FAST_STRING;
+
+ preempt_disable();
+ if (wrmsrl_safe(MSR_IA32_MISC_ENABLE,
+ data | MSR_IA32_MISC_ENABLE_X87_COMPAT) >= 0) {
+ guest_misc_enable_mask |= MSR_IA32_MISC_ENABLE_X87_COMPAT;
+ wrmsrl(MSR_IA32_MISC_ENABLE, data);
+ }
+ preempt_enable();
+}
+
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
{
u64 guest_efer;
@@ -3126,6 +3155,8 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
+ update_guest_misc_enable_mask();
+
if (enable_apicv)
kvm_x86_ops->update_cr8_intercept = NULL;
else {
@@ -3315,6 +3346,28 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
setup_msrs(vmx);
}
+static void vmx_set_misc_enable(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ vcpu->arch.ia32_misc_enable_msr = data;
+
+ if (guest_misc_enable_mask == 0)
+ return;
+ clear_atomic_switch_msr(vmx, MSR_IA32_MISC_ENABLE);
+ if (((data ^ host_misc_enable) & guest_misc_enable_mask) == 0)
+ return;
+
+ /*
+ * If the guest has different value, we want to load it atomically
+ * since it can affect host performance
+ */
+ data &= guest_misc_enable_mask;
+ data |= (host_misc_enable & ~guest_misc_enable_mask);
+ add_atomic_switch_msr(vmx, MSR_IA32_MISC_ENABLE, data,
+ host_misc_enable);
+}
+
#ifdef CONFIG_X86_64
static void enter_lmode(struct kvm_vcpu *vcpu)
@@ -4555,6 +4608,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
vmx_set_cr4(&vmx->vcpu, 0);
vmx_set_efer(&vmx->vcpu, 0);
+ vmx_set_misc_enable(&vmx->vcpu, 0);
vmx_fpu_activate(&vmx->vcpu);
update_exception_bitmap(&vmx->vcpu);
@@ -8883,6 +8937,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_cr3 = vmx_set_cr3,
.set_cr4 = vmx_set_cr4,
.set_efer = vmx_set_efer,
+ .set_misc_enable = vmx_set_misc_enable,
.get_idt = vmx_get_idt,
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
@@ -8961,6 +9016,7 @@ static int __init vmx_init(void)
int r, i, msr;
rdmsrl_safe(MSR_EFER, &host_efer);
+ rdmsrl_safe(MSR_IA32_MISC_ENABLE, &host_misc_enable);
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
kvm_define_shared_msr(i, vmx_msr_index[i]);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5f5edb6..72d449a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2096,7 +2096,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
case MSR_IA32_MISC_ENABLE:
- vcpu->arch.ia32_misc_enable_msr = data;
+ kvm_x86_ops->set_misc_enable(vcpu, data);
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
--
1.9.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH] x86: Test debug exceptions with disabled fast-string
2014-08-20 13:58 [PATCH 0/2] KVM: vmx: Supporting IA32_MISC_ENABLE MSR Nadav Amit
2014-08-20 13:58 ` [PATCH 1/2] KVM: x86: update cpuid according to IA32_MISC_ENABLE Nadav Amit
2014-08-20 13:58 ` [PATCH 2/2] KVM: vmx: Reflect misc_enables in real CPU Nadav Amit
@ 2014-08-20 14:00 ` Nadav Amit
2 siblings, 0 replies; 6+ messages in thread
From: Nadav Amit @ 2014-08-20 14:00 UTC (permalink / raw)
To: pbonzini; +Cc: joro, kvm, nadav.amit, Nadav Amit
x86 allows to enable "fast strings", sacrificing the precision of debug
watchpoints. Previously, KVM did not reflect the guest "fast strings" settings
in the actual MSR, resulting always in imprecise exception.
This test checks whether disabled "fast strings" causes the debug trap on
rep-string to occur on the precise iteration. A debug watchpoint which is not
cache-line aligned is set, and 128 bytes are set using rep-string operation.
The iteration in which the debug exception occurred is then checked.
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
---
x86/debug.c | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/x86/debug.c b/x86/debug.c
index 34e56fb..eb96dbe 100644
--- a/x86/debug.c
+++ b/x86/debug.c
@@ -11,10 +11,13 @@
#include "libcflat.h"
#include "desc.h"
+#include "msr.h"
+#include "processor.h"
-static volatile unsigned long bp_addr[10], dr6[10];
+static volatile unsigned long bp_addr[10], dr6[10], rcx[10];
static volatile unsigned int n;
static volatile unsigned long value;
+static unsigned char dst[128] __attribute__ ((aligned(64)));
static unsigned long get_dr6(void)
{
@@ -43,6 +46,7 @@ static void handle_db(struct ex_regs *regs)
{
bp_addr[n] = regs->rip;
dr6[n] = get_dr6();
+ rcx[n] = regs->rcx;
if (dr6[n] & 0x1)
regs->rflags |= (1 << 16);
@@ -60,7 +64,7 @@ static void handle_bp(struct ex_regs *regs)
int main(int ac, char **av)
{
- unsigned long start;
+ unsigned long start, misc_enable;
setup_idt();
handle_exception(DB_VECTOR, handle_db);
@@ -109,5 +113,18 @@ hw_wp:
n == 1 &&
bp_addr[0] == ((unsigned long)&&hw_wp) && dr6[0] == 0xffff4ff2);
+ misc_enable = rdmsr(MSR_IA32_MISC_ENABLE);
+ wrmsr(MSR_IA32_MISC_ENABLE,
+ misc_enable & ~MSR_IA32_MISC_ENABLE_FAST_STRING);
+
+ n = 0;
+ set_dr1((void *)&dst[59]);
+ set_dr7(0x0010040a);
+
+ asm volatile("rep stosb\n\t" : : "D"(dst), "c"(128) : "cc", "memory");
+
+ report("hw watchpoint with disabled fast-string", rcx[0] == 128-1-59);
+ wrmsr(MSR_IA32_MISC_ENABLE, misc_enable);
+
return report_summary();
}
--
1.9.1
^ permalink raw reply related [flat|nested] 6+ messages in thread