* [PATCH 1/3] x86emul: correct EFLAGS.TF handling
2016-12-12 9:38 [PATCH 0/3] x86emul: misc adjustments Jan Beulich
@ 2016-12-12 9:59 ` Jan Beulich
2016-12-12 10:00 ` [PATCH 2/3 v2] x86emul: conditionally clear BNDn for branches Jan Beulich
` (2 subsequent siblings)
3 siblings, 0 replies; 13+ messages in thread
From: Jan Beulich @ 2016-12-12 9:59 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 5391 bytes --]
For repeated string instructions we should not emulate multiple
iterations in one go when a single step trap needs injecting (which
needs to happen after every iteration).
For all non-branch instructions as well as not taken conditional
branches we additionally need to take DebugCtl.BTF into consideration.
And for mov-to/pop-into %ss there should be no #DB at all (EFLAGS.TF
remaining set means there'll be #DB after the next instruction).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -415,6 +415,8 @@ typedef union {
#define MSR_SYSENTER_CS 0x00000174
#define MSR_SYSENTER_ESP 0x00000175
#define MSR_SYSENTER_EIP 0x00000176
+#define MSR_DEBUGCTL 0x000001d9
+#define DEBUGCTL_BTF (1 << 1)
#define MSR_EFER 0xc0000080
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
@@ -751,6 +753,8 @@ do {
rc = ops->insn_fetch(x86_seg_cs, ip, NULL, 0, ctxt); \
if ( rc ) goto done; \
_regs.eip = ip; \
+ if ( _regs.eflags & EFLG_TF ) \
+ ctxt->retire.singlestep = true; \
} while (0)
#define validate_far_branch(cs, ip) ({ \
@@ -767,6 +771,8 @@ do {
#define commit_far_branch(cs, ip) ({ \
validate_far_branch(cs, ip); \
_regs.eip = (ip); \
+ if ( _regs.eflags & EFLG_TF ) \
+ ctxt->retire.singlestep = true; \
ops->write_segment(x86_seg_cs, cs, ctxt); \
})
@@ -948,6 +954,9 @@ static inline void put_loop_count(
} \
goto no_writeback; \
} \
+ if ( max_reps > 1 && (_regs.eflags & EFLG_TF) && \
+ !is_branch_step(ctxt, ops) ) \
+ max_reps = 1; \
max_reps; \
})
@@ -1637,6 +1646,16 @@ static bool is_aligned(enum x86_segment
return !((reg.base + offs) & (size - 1));
}
+static bool is_branch_step(struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+{
+ uint64_t debugctl;
+
+ return ops->read_msr &&
+ ops->read_msr(MSR_DEBUGCTL, &debugctl, ctxt) == X86EMUL_OKAY &&
+ (debugctl & DEBUGCTL_BTF);
+}
+
static bool umip_active(struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
@@ -3132,6 +3151,8 @@ x86_emulate(
goto done;
_regs.eip = imm1;
+ if ( _regs.eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
case 0x9b: /* wait/fwait */
@@ -4608,6 +4629,8 @@ x86_emulate(
(rc = ops->write_segment(x86_seg_ss, &sreg, ctxt)) )
goto done;
+ if ( ctxt->regs->eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
}
@@ -4875,6 +4898,8 @@ x86_emulate(
goto done;
_regs.esp = lm ? msr_content : (uint32_t)msr_content;
+ if ( _regs.eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
}
@@ -4914,6 +4939,9 @@ x86_emulate(
_regs.eip = user64 ? _regs.edx : (uint32_t)_regs.edx;
_regs.esp = user64 ? _regs.ecx : (uint32_t)_regs.ecx;
+
+ if ( _regs.eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
}
@@ -5400,7 +5428,9 @@ x86_emulate(
break;
#endif
default:
- goto cannot_emulate;
+ cannot_emulate:
+ rc = X86EMUL_UNHANDLEABLE;
+ goto done;
}
switch ( dst.type )
@@ -5445,7 +5475,8 @@ x86_emulate(
_regs.eip = (uint32_t)_regs.eip;
/* Was singestepping active at the start of this instruction? */
- if ( (rc == X86EMUL_OKAY) && (ctxt->regs->eflags & EFLG_TF) )
+ if ( (rc == X86EMUL_OKAY) && (ctxt->regs->eflags & EFLG_TF) &&
+ !is_branch_step(ctxt, ops) && !ctxt->retire.mov_ss )
ctxt->retire.singlestep = true;
*ctxt->regs = _regs;
@@ -5461,12 +5492,17 @@ x86_emulate(
done:
_put_fpu();
put_stub(stub);
- return rc;
- cannot_emulate:
- _put_fpu();
- put_stub(stub);
- return X86EMUL_UNHANDLEABLE;
+ /*
+ * We may have set the single step flag ahead of the last possible point
+ * of failure (unavoidably with the current near CALL code flow, but also
+ * used on some far branch paths to keep the code simple), so to satisfy
+ * x86_emulate_wrapper()'s ASSERT() we may need to clear it here again.
+ */
+ if ( rc != X86EMUL_OKAY )
+ ctxt->retire.singlestep = false;
+
+ return rc;
#undef state
}
[-- Attachment #2: x86emul-sstep-bstep.patch --]
[-- Type: text/plain, Size: 5426 bytes --]
x86emul: correct EFLAGS.TF handling
For repeated string instructions we should not emulate multiple
iterations in one go when a single step trap needs injecting (which
needs to happen after every iteration).
For all non-branch instructions as well as not taken conditional
branches we additionally need to take DebugCtl.BTF into consideration.
And for mov-to/pop-into %ss there should be no #DB at all (EFLAGS.TF
remaining set means there'll be #DB after the next instruction).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -415,6 +415,8 @@ typedef union {
#define MSR_SYSENTER_CS 0x00000174
#define MSR_SYSENTER_ESP 0x00000175
#define MSR_SYSENTER_EIP 0x00000176
+#define MSR_DEBUGCTL 0x000001d9
+#define DEBUGCTL_BTF (1 << 1)
#define MSR_EFER 0xc0000080
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
@@ -751,6 +753,8 @@ do {
rc = ops->insn_fetch(x86_seg_cs, ip, NULL, 0, ctxt); \
if ( rc ) goto done; \
_regs.eip = ip; \
+ if ( _regs.eflags & EFLG_TF ) \
+ ctxt->retire.singlestep = true; \
} while (0)
#define validate_far_branch(cs, ip) ({ \
@@ -767,6 +771,8 @@ do {
#define commit_far_branch(cs, ip) ({ \
validate_far_branch(cs, ip); \
_regs.eip = (ip); \
+ if ( _regs.eflags & EFLG_TF ) \
+ ctxt->retire.singlestep = true; \
ops->write_segment(x86_seg_cs, cs, ctxt); \
})
@@ -948,6 +954,9 @@ static inline void put_loop_count(
} \
goto no_writeback; \
} \
+ if ( max_reps > 1 && (_regs.eflags & EFLG_TF) && \
+ !is_branch_step(ctxt, ops) ) \
+ max_reps = 1; \
max_reps; \
})
@@ -1637,6 +1646,16 @@ static bool is_aligned(enum x86_segment
return !((reg.base + offs) & (size - 1));
}
+static bool is_branch_step(struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+{
+ uint64_t debugctl;
+
+ return ops->read_msr &&
+ ops->read_msr(MSR_DEBUGCTL, &debugctl, ctxt) == X86EMUL_OKAY &&
+ (debugctl & DEBUGCTL_BTF);
+}
+
static bool umip_active(struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
@@ -3132,6 +3151,8 @@ x86_emulate(
goto done;
_regs.eip = imm1;
+ if ( _regs.eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
case 0x9b: /* wait/fwait */
@@ -4608,6 +4629,8 @@ x86_emulate(
(rc = ops->write_segment(x86_seg_ss, &sreg, ctxt)) )
goto done;
+ if ( ctxt->regs->eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
}
@@ -4875,6 +4898,8 @@ x86_emulate(
goto done;
_regs.esp = lm ? msr_content : (uint32_t)msr_content;
+ if ( _regs.eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
}
@@ -4914,6 +4939,9 @@ x86_emulate(
_regs.eip = user64 ? _regs.edx : (uint32_t)_regs.edx;
_regs.esp = user64 ? _regs.ecx : (uint32_t)_regs.ecx;
+
+ if ( _regs.eflags & EFLG_TF )
+ ctxt->retire.singlestep = true;
break;
}
@@ -5400,7 +5428,9 @@ x86_emulate(
break;
#endif
default:
- goto cannot_emulate;
+ cannot_emulate:
+ rc = X86EMUL_UNHANDLEABLE;
+ goto done;
}
switch ( dst.type )
@@ -5445,7 +5475,8 @@ x86_emulate(
_regs.eip = (uint32_t)_regs.eip;
/* Was singestepping active at the start of this instruction? */
- if ( (rc == X86EMUL_OKAY) && (ctxt->regs->eflags & EFLG_TF) )
+ if ( (rc == X86EMUL_OKAY) && (ctxt->regs->eflags & EFLG_TF) &&
+ !is_branch_step(ctxt, ops) && !ctxt->retire.mov_ss )
ctxt->retire.singlestep = true;
*ctxt->regs = _regs;
@@ -5461,12 +5492,17 @@ x86_emulate(
done:
_put_fpu();
put_stub(stub);
- return rc;
- cannot_emulate:
- _put_fpu();
- put_stub(stub);
- return X86EMUL_UNHANDLEABLE;
+ /*
+ * We may have set the single step flag ahead of the last possible point
+ * of failure (unavoidably with the current near CALL code flow, but also
+ * used on some far branch paths to keep the code simple), so to satisfy
+ * x86_emulate_wrapper()'s ASSERT() we may need to clear it here again.
+ */
+ if ( rc != X86EMUL_OKAY )
+ ctxt->retire.singlestep = false;
+
+ return rc;
#undef state
}
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 2/3 v2] x86emul: conditionally clear BNDn for branches
2016-12-12 9:38 [PATCH 0/3] x86emul: misc adjustments Jan Beulich
2016-12-12 9:59 ` [PATCH 1/3] x86emul: correct EFLAGS.TF handling Jan Beulich
@ 2016-12-12 10:00 ` Jan Beulich
2017-01-04 21:11 ` Andrew Cooper
2016-12-12 10:00 ` [PATCH 3/3] x86emul: some REX related polishing Jan Beulich
2016-12-20 9:04 ` Ping: [PATCH 0/3] x86emul: misc adjustments Jan Beulich
3 siblings, 1 reply; 13+ messages in thread
From: Jan Beulich @ 2016-12-12 10:00 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 9091 bytes --]
Considering that we surface MPX to HVM guests, instructions we emulate
should also correctly deal with MPX state. While for now BND*
instructions don't get emulated, the effect of branches (which we do
emulate) without BND prefix should be taken care of.
No need to alter XABORT behavior: While not mentioned in the SDM so
far, this restores BNDn as they were at the XBEGIN, and since we make
XBEGIN abort right away, XABORT in the emulator is only a no-op.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Re-base. Address all RFC reasons based on feedback from Intel.
Re-work the actual clearing of BNDn.
--- a/tools/tests/x86_emulator/x86_emulate.c
+++ b/tools/tests/x86_emulator/x86_emulate.c
@@ -5,6 +5,8 @@
#define cpu_has_amd_erratum(nr) 0
#define mark_regs_dirty(r) ((void)(r))
+#define read_bndcfgu() 0
+#define xstate_set_init(what)
/* For generic assembly code: use macros to define operation/operand sizes. */
#ifdef __i386__
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -421,6 +421,8 @@ int vcpu_initialise(struct vcpu *v)
vmce_init_vcpu(v);
}
+ else if ( (rc = xstate_alloc_save_area(v)) != 0 )
+ return rc;
spin_lock_init(&v->arch.vpmu.vpmu_lock);
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -417,6 +417,9 @@ typedef union {
#define MSR_SYSENTER_EIP 0x00000176
#define MSR_DEBUGCTL 0x000001d9
#define DEBUGCTL_BTF (1 << 1)
+#define MSR_BNDCFGS 0x00000d90
+#define BNDCFG_ENABLE (1 << 0)
+#define BNDCFG_PRESERVE (1 << 1)
#define MSR_EFER 0xc0000080
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
@@ -1295,6 +1298,7 @@ static bool vcpu_has(
#define vcpu_has_bmi1() vcpu_has( 7, EBX, 3, ctxt, ops)
#define vcpu_has_hle() vcpu_has( 7, EBX, 4, ctxt, ops)
#define vcpu_has_rtm() vcpu_has( 7, EBX, 11, ctxt, ops)
+#define vcpu_has_mpx() vcpu_has( 7, EBX, 14, ctxt, ops)
#define vcpu_must_have(feat) \
generate_exception_if(!vcpu_has_##feat(), EXC_UD)
@@ -1791,6 +1795,34 @@ static int inject_swint(enum x86_swint_t
generate_exception(fault_type, error_code);
}
+static void clear_bnd(struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops, enum vex_pfx pfx)
+{
+ uint64_t bndcfg;
+ int rc;
+
+ if ( pfx == vex_f2 || !vcpu_has_mpx() )
+ return;
+
+ if ( !mode_ring0() )
+ bndcfg = read_bndcfgu();
+ else if ( !ops->read_msr ||
+ ops->read_msr(MSR_BNDCFGS, &bndcfg, ctxt) != X86EMUL_OKAY )
+ return;
+ if ( (bndcfg & BNDCFG_ENABLE) && !(bndcfg & BNDCFG_PRESERVE) )
+ {
+ /*
+ * Using BNDMK or any other MPX instruction here is pointless, as
+ * we run with MPX disabled ourselves, and hence they're all no-ops.
+ * Therefore we have two ways to clear BNDn: Enable MPX temporarily
+ * (in which case executing any suitable non-prefixed branch
+ * instruction would do), or use XRSTOR.
+ */
+ xstate_set_init(XSTATE_BNDREGS);
+ }
+ done:;
+}
+
int x86emul_unhandleable_rw(
enum x86_segment seg,
unsigned long offset,
@@ -2975,6 +3007,7 @@ x86_emulate(
case 0x70 ... 0x7f: /* jcc (short) */
if ( test_cc(b, _regs.eflags) )
jmp_rel((int32_t)src.val);
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 0x82: /* Grp1 (x86/32 only) */
@@ -3340,6 +3373,7 @@ x86_emulate(
(rc = ops->insn_fetch(x86_seg_cs, dst.val, NULL, 0, ctxt)) )
goto done;
_regs.eip = dst.val;
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 0xc4: /* les */ {
@@ -4059,12 +4093,15 @@ x86_emulate(
op_bytes = ((op_bytes == 4) && mode_64bit()) ? 8 : op_bytes;
src.val = _regs.eip;
jmp_rel(rel);
+ clear_bnd(ctxt, ops, vex.pfx);
goto push;
}
case 0xe9: /* jmp (near) */
case 0xeb: /* jmp (short) */
jmp_rel((int32_t)src.val);
+ if ( !(b & 2) )
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 0xea: /* jmp (far, absolute) */
@@ -4323,12 +4360,14 @@ x86_emulate(
goto done;
_regs.eip = src.val;
src.val = dst.val;
+ clear_bnd(ctxt, ops, vex.pfx);
goto push;
case 4: /* jmp (near) */
if ( (rc = ops->insn_fetch(x86_seg_cs, src.val, NULL, 0, ctxt)) )
goto done;
_regs.eip = src.val;
dst.type = OP_NONE;
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 3: /* call (far, absolute indirect) */
case 5: /* jmp (far, absolute indirect) */
@@ -5047,6 +5086,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x80) ... X86EMUL_OPC(0x0f, 0x8f): /* jcc (near) */
if ( test_cc(b, _regs.eflags) )
jmp_rel((int32_t)src.val);
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case X86EMUL_OPC(0x0f, 0x90) ... X86EMUL_OPC(0x0f, 0x9f): /* setcc */
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -496,15 +496,33 @@ bool_t xsave_enabled(const struct vcpu *
int xstate_alloc_save_area(struct vcpu *v)
{
struct xsave_struct *save_area;
+ unsigned int size;
- if ( !cpu_has_xsave || is_idle_vcpu(v) )
+ if ( !cpu_has_xsave )
return 0;
- BUG_ON(xsave_cntxt_size < XSTATE_AREA_MIN_SIZE);
+ if ( !is_idle_vcpu(v) || !cpu_has_xsavec )
+ {
+ size = xsave_cntxt_size;
+ BUG_ON(size < XSTATE_AREA_MIN_SIZE);
+ }
+ else
+ {
+ /*
+ * For idle vcpus on XSAVEC-capable CPUs allocate an area large
+ * enough to save any individual extended state.
+ */
+ unsigned int i;
+
+ for ( size = 0, i = 2; i < xstate_features; ++i )
+ if ( size < xstate_sizes[i] )
+ size = xstate_sizes[i];
+ size += XSTATE_AREA_MIN_SIZE;
+ }
/* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
BUILD_BUG_ON(__alignof(*save_area) < 64);
- save_area = _xzalloc(xsave_cntxt_size, __alignof(*save_area));
+ save_area = _xzalloc(size, __alignof(*save_area));
if ( save_area == NULL )
return -ENOMEM;
@@ -723,6 +741,66 @@ int handle_xsetbv(u32 index, u64 new_bv)
return 0;
}
+uint64_t read_bndcfgu(void)
+{
+ unsigned long cr0 = read_cr0();
+ struct xsave_struct *xstate
+ = idle_vcpu[smp_processor_id()]->arch.xsave_area;
+ const struct xstate_bndcsr *bndcsr;
+
+ ASSERT(cpu_has_mpx);
+ clts();
+
+ if ( cpu_has_xsavec )
+ {
+ asm ( ".byte 0x0f,0xc7,0x27\n" /* xsavec */
+ : "=m" (*xstate)
+ : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
+
+ bndcsr = (void *)(xstate + 1);
+ }
+ else
+ {
+ alternative_io(".byte 0x0f,0xae,0x27\n", /* xsave */
+ ".byte 0x0f,0xae,0x37\n", /* xsaveopt */
+ X86_FEATURE_XSAVEOPT,
+ "=m" (*xstate),
+ "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate));
+
+ bndcsr = (void *)xstate + xstate_offsets[_XSTATE_BNDCSR];
+ }
+
+ if ( cr0 & X86_CR0_TS )
+ write_cr0(cr0);
+
+ return xstate->xsave_hdr.xstate_bv & XSTATE_BNDCSR ? bndcsr->bndcfgu : 0;
+}
+
+void xstate_set_init(uint64_t mask)
+{
+ unsigned long cr0 = read_cr0();
+ unsigned long xcr0 = this_cpu(xcr0);
+ struct vcpu *v = idle_vcpu[smp_processor_id()];
+ struct xsave_struct *xstate = v->arch.xsave_area;
+
+ if ( ~xfeature_mask & mask )
+ return;
+
+ if ( (~xcr0 & mask) && !set_xcr0(xcr0 | mask) )
+ return;
+
+ clts();
+
+ memset(&xstate->xsave_hdr, 0, sizeof(xstate->xsave_hdr));
+ xrstor(v, mask);
+
+ if ( cr0 & X86_CR0_TS )
+ write_cr0(cr0);
+
+ if ( ~xcr0 & mask )
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0);
+}
+
/*
* Local variables:
* mode: C
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -99,13 +99,20 @@ struct __attribute__((aligned (64))) xsa
char data[]; /* Variable layout states */
};
+struct xstate_bndcsr {
+ uint64_t bndcfgu;
+ uint64_t bndstatus;
+};
+
/* extended state operations */
bool_t __must_check set_xcr0(u64 xfeatures);
uint64_t get_xcr0(void);
void set_msr_xss(u64 xss);
uint64_t get_msr_xss(void);
+uint64_t read_bndcfgu(void);
void xsave(struct vcpu *v, uint64_t mask);
void xrstor(struct vcpu *v, uint64_t mask);
+void xstate_set_init(uint64_t mask);
bool_t xsave_enabled(const struct vcpu *v);
int __must_check validate_xstate(u64 xcr0, u64 xcr0_accum,
const struct xsave_hdr *);
[-- Attachment #2: x86emul-clear-bnd-on-branch.patch --]
[-- Type: text/plain, Size: 9137 bytes --]
x86emul: conditionally clear BNDn for branches
Considering that we surface MPX to HVM guests, instructions we emulate
should also correctly deal with MPX state. While for now BND*
instructions don't get emulated, the effect of branches (which we do
emulate) without BND prefix should be taken care of.
No need to alter XABORT behavior: While not mentioned in the SDM so
far, this restores BNDn as they were at the XBEGIN, and since we make
XBEGIN abort right away, XABORT in the emulator is only a no-op.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Re-base. Address all RFC reasons based on feedback from Intel.
Re-work the actual clearing of BNDn.
--- a/tools/tests/x86_emulator/x86_emulate.c
+++ b/tools/tests/x86_emulator/x86_emulate.c
@@ -5,6 +5,8 @@
#define cpu_has_amd_erratum(nr) 0
#define mark_regs_dirty(r) ((void)(r))
+#define read_bndcfgu() 0
+#define xstate_set_init(what)
/* For generic assembly code: use macros to define operation/operand sizes. */
#ifdef __i386__
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -421,6 +421,8 @@ int vcpu_initialise(struct vcpu *v)
vmce_init_vcpu(v);
}
+ else if ( (rc = xstate_alloc_save_area(v)) != 0 )
+ return rc;
spin_lock_init(&v->arch.vpmu.vpmu_lock);
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -417,6 +417,9 @@ typedef union {
#define MSR_SYSENTER_EIP 0x00000176
#define MSR_DEBUGCTL 0x000001d9
#define DEBUGCTL_BTF (1 << 1)
+#define MSR_BNDCFGS 0x00000d90
+#define BNDCFG_ENABLE (1 << 0)
+#define BNDCFG_PRESERVE (1 << 1)
#define MSR_EFER 0xc0000080
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
@@ -1295,6 +1298,7 @@ static bool vcpu_has(
#define vcpu_has_bmi1() vcpu_has( 7, EBX, 3, ctxt, ops)
#define vcpu_has_hle() vcpu_has( 7, EBX, 4, ctxt, ops)
#define vcpu_has_rtm() vcpu_has( 7, EBX, 11, ctxt, ops)
+#define vcpu_has_mpx() vcpu_has( 7, EBX, 14, ctxt, ops)
#define vcpu_must_have(feat) \
generate_exception_if(!vcpu_has_##feat(), EXC_UD)
@@ -1791,6 +1795,34 @@ static int inject_swint(enum x86_swint_t
generate_exception(fault_type, error_code);
}
+static void clear_bnd(struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops, enum vex_pfx pfx)
+{
+ uint64_t bndcfg;
+ int rc;
+
+ if ( pfx == vex_f2 || !vcpu_has_mpx() )
+ return;
+
+ if ( !mode_ring0() )
+ bndcfg = read_bndcfgu();
+ else if ( !ops->read_msr ||
+ ops->read_msr(MSR_BNDCFGS, &bndcfg, ctxt) != X86EMUL_OKAY )
+ return;
+ if ( (bndcfg & BNDCFG_ENABLE) && !(bndcfg & BNDCFG_PRESERVE) )
+ {
+ /*
+ * Using BNDMK or any other MPX instruction here is pointless, as
+ * we run with MPX disabled ourselves, and hence they're all no-ops.
+ * Therefore we have two ways to clear BNDn: Enable MPX temporarily
+ * (in which case executing any suitable non-prefixed branch
+ * instruction would do), or use XRSTOR.
+ */
+ xstate_set_init(XSTATE_BNDREGS);
+ }
+ done:;
+}
+
int x86emul_unhandleable_rw(
enum x86_segment seg,
unsigned long offset,
@@ -2975,6 +3007,7 @@ x86_emulate(
case 0x70 ... 0x7f: /* jcc (short) */
if ( test_cc(b, _regs.eflags) )
jmp_rel((int32_t)src.val);
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 0x82: /* Grp1 (x86/32 only) */
@@ -3340,6 +3373,7 @@ x86_emulate(
(rc = ops->insn_fetch(x86_seg_cs, dst.val, NULL, 0, ctxt)) )
goto done;
_regs.eip = dst.val;
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 0xc4: /* les */ {
@@ -4059,12 +4093,15 @@ x86_emulate(
op_bytes = ((op_bytes == 4) && mode_64bit()) ? 8 : op_bytes;
src.val = _regs.eip;
jmp_rel(rel);
+ clear_bnd(ctxt, ops, vex.pfx);
goto push;
}
case 0xe9: /* jmp (near) */
case 0xeb: /* jmp (short) */
jmp_rel((int32_t)src.val);
+ if ( !(b & 2) )
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 0xea: /* jmp (far, absolute) */
@@ -4323,12 +4360,14 @@ x86_emulate(
goto done;
_regs.eip = src.val;
src.val = dst.val;
+ clear_bnd(ctxt, ops, vex.pfx);
goto push;
case 4: /* jmp (near) */
if ( (rc = ops->insn_fetch(x86_seg_cs, src.val, NULL, 0, ctxt)) )
goto done;
_regs.eip = src.val;
dst.type = OP_NONE;
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case 3: /* call (far, absolute indirect) */
case 5: /* jmp (far, absolute indirect) */
@@ -5047,6 +5086,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x80) ... X86EMUL_OPC(0x0f, 0x8f): /* jcc (near) */
if ( test_cc(b, _regs.eflags) )
jmp_rel((int32_t)src.val);
+ clear_bnd(ctxt, ops, vex.pfx);
break;
case X86EMUL_OPC(0x0f, 0x90) ... X86EMUL_OPC(0x0f, 0x9f): /* setcc */
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -496,15 +496,33 @@ bool_t xsave_enabled(const struct vcpu *
int xstate_alloc_save_area(struct vcpu *v)
{
struct xsave_struct *save_area;
+ unsigned int size;
- if ( !cpu_has_xsave || is_idle_vcpu(v) )
+ if ( !cpu_has_xsave )
return 0;
- BUG_ON(xsave_cntxt_size < XSTATE_AREA_MIN_SIZE);
+ if ( !is_idle_vcpu(v) || !cpu_has_xsavec )
+ {
+ size = xsave_cntxt_size;
+ BUG_ON(size < XSTATE_AREA_MIN_SIZE);
+ }
+ else
+ {
+ /*
+ * For idle vcpus on XSAVEC-capable CPUs allocate an area large
+ * enough to save any individual extended state.
+ */
+ unsigned int i;
+
+ for ( size = 0, i = 2; i < xstate_features; ++i )
+ if ( size < xstate_sizes[i] )
+ size = xstate_sizes[i];
+ size += XSTATE_AREA_MIN_SIZE;
+ }
/* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
BUILD_BUG_ON(__alignof(*save_area) < 64);
- save_area = _xzalloc(xsave_cntxt_size, __alignof(*save_area));
+ save_area = _xzalloc(size, __alignof(*save_area));
if ( save_area == NULL )
return -ENOMEM;
@@ -723,6 +741,66 @@ int handle_xsetbv(u32 index, u64 new_bv)
return 0;
}
+uint64_t read_bndcfgu(void)
+{
+ unsigned long cr0 = read_cr0();
+ struct xsave_struct *xstate
+ = idle_vcpu[smp_processor_id()]->arch.xsave_area;
+ const struct xstate_bndcsr *bndcsr;
+
+ ASSERT(cpu_has_mpx);
+ clts();
+
+ if ( cpu_has_xsavec )
+ {
+ asm ( ".byte 0x0f,0xc7,0x27\n" /* xsavec */
+ : "=m" (*xstate)
+ : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
+
+ bndcsr = (void *)(xstate + 1);
+ }
+ else
+ {
+ alternative_io(".byte 0x0f,0xae,0x27\n", /* xsave */
+ ".byte 0x0f,0xae,0x37\n", /* xsaveopt */
+ X86_FEATURE_XSAVEOPT,
+ "=m" (*xstate),
+ "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate));
+
+ bndcsr = (void *)xstate + xstate_offsets[_XSTATE_BNDCSR];
+ }
+
+ if ( cr0 & X86_CR0_TS )
+ write_cr0(cr0);
+
+ return xstate->xsave_hdr.xstate_bv & XSTATE_BNDCSR ? bndcsr->bndcfgu : 0;
+}
+
+void xstate_set_init(uint64_t mask)
+{
+ unsigned long cr0 = read_cr0();
+ unsigned long xcr0 = this_cpu(xcr0);
+ struct vcpu *v = idle_vcpu[smp_processor_id()];
+ struct xsave_struct *xstate = v->arch.xsave_area;
+
+ if ( ~xfeature_mask & mask )
+ return;
+
+ if ( (~xcr0 & mask) && !set_xcr0(xcr0 | mask) )
+ return;
+
+ clts();
+
+ memset(&xstate->xsave_hdr, 0, sizeof(xstate->xsave_hdr));
+ xrstor(v, mask);
+
+ if ( cr0 & X86_CR0_TS )
+ write_cr0(cr0);
+
+ if ( ~xcr0 & mask )
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0);
+}
+
/*
* Local variables:
* mode: C
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -99,13 +99,20 @@ struct __attribute__((aligned (64))) xsa
char data[]; /* Variable layout states */
};
+struct xstate_bndcsr {
+ uint64_t bndcfgu;
+ uint64_t bndstatus;
+};
+
/* extended state operations */
bool_t __must_check set_xcr0(u64 xfeatures);
uint64_t get_xcr0(void);
void set_msr_xss(u64 xss);
uint64_t get_msr_xss(void);
+uint64_t read_bndcfgu(void);
void xsave(struct vcpu *v, uint64_t mask);
void xrstor(struct vcpu *v, uint64_t mask);
+void xstate_set_init(uint64_t mask);
bool_t xsave_enabled(const struct vcpu *v);
int __must_check validate_xstate(u64 xcr0, u64 xcr0_accum,
const struct xsave_hdr *);
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 3/3] x86emul: some REX related polishing
2016-12-12 9:38 [PATCH 0/3] x86emul: misc adjustments Jan Beulich
2016-12-12 9:59 ` [PATCH 1/3] x86emul: correct EFLAGS.TF handling Jan Beulich
2016-12-12 10:00 ` [PATCH 2/3 v2] x86emul: conditionally clear BNDn for branches Jan Beulich
@ 2016-12-12 10:00 ` Jan Beulich
2016-12-20 11:29 ` Andrew Cooper
2016-12-20 9:04 ` Ping: [PATCH 0/3] x86emul: misc adjustments Jan Beulich
3 siblings, 1 reply; 13+ messages in thread
From: Jan Beulich @ 2016-12-12 10:00 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 2590 bytes --]
While there are a few cases where it seems better to open-code REX_*
values, there's one where this clearly is a bad idea. And the SYSEXIT
emulation has no need to look at REX at all, it can simply use op_bytes
instead.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -3082,7 +3082,7 @@ x86_emulate(
case 0x90: /* nop / xchg %%r8,%%rax */
case X86EMUL_OPC_F3(0, 0x90): /* pause / xchg %%r8,%%rax */
- if ( !(rex_prefix & 1) )
+ if ( !(rex_prefix & REX_B) )
break; /* nop / pause */
/* fall through */
@@ -4897,7 +4897,6 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
{
uint64_t msr_content;
- bool user64 = rex_prefix & REX_W;
generate_exception_if(!mode_ring0(), EXC_GP, 0);
generate_exception_if(!in_protmode(ctxt, ops), EXC_GP, 0);
@@ -4907,16 +4906,17 @@ x86_emulate(
goto done;
generate_exception_if(!(msr_content & 0xfffc), EXC_GP, 0);
- generate_exception_if(user64 && (!is_canonical_address(_regs.edx) ||
- !is_canonical_address(_regs.ecx)),
+ generate_exception_if(op_bytes == 8 &&
+ (!is_canonical_address(_regs.edx) ||
+ !is_canonical_address(_regs.ecx)),
EXC_GP, 0);
cs.sel = (msr_content | 3) + /* SELECTOR_RPL_MASK */
- (user64 ? 32 : 16);
+ (op_bytes == 8 ? 32 : 16);
cs.base = 0; /* flat segment */
cs.limit = ~0u; /* 4GB limit */
- cs.attr.bytes = user64 ? 0xafb /* L+DB+P+DPL3+S+Code */
- : 0xcfb; /* G+DB+P+DPL3+S+Code */
+ cs.attr.bytes = op_bytes == 8 ? 0xafb /* L+DB+P+DPL3+S+Code */
+ : 0xcfb; /* G+DB+P+DPL3+S+Code */
sreg.sel = cs.sel + 8;
sreg.base = 0; /* flat segment */
@@ -4928,8 +4928,8 @@ x86_emulate(
(rc = ops->write_segment(x86_seg_ss, &sreg, ctxt)) != 0 )
goto done;
- _regs.eip = user64 ? _regs.edx : (uint32_t)_regs.edx;
- _regs.esp = user64 ? _regs.ecx : (uint32_t)_regs.ecx;
+ _regs.eip = op_bytes == 8 ? _regs.edx : (uint32_t)_regs.edx;
+ _regs.esp = op_bytes == 8 ? _regs.ecx : (uint32_t)_regs.ecx;
if ( _regs.eflags & EFLG_TF )
ctxt->retire.singlestep = true;
[-- Attachment #2: x86emul-REX-cleanup.patch --]
[-- Type: text/plain, Size: 2623 bytes --]
x86emul: some REX related polishing
While there are a few cases where it seems better to open-code REX_*
values, there's one where this clearly is a bad idea. And the SYSEXIT
emulation has no need to look at REX at all, it can simply use op_bytes
instead.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -3082,7 +3082,7 @@ x86_emulate(
case 0x90: /* nop / xchg %%r8,%%rax */
case X86EMUL_OPC_F3(0, 0x90): /* pause / xchg %%r8,%%rax */
- if ( !(rex_prefix & 1) )
+ if ( !(rex_prefix & REX_B) )
break; /* nop / pause */
/* fall through */
@@ -4897,7 +4897,6 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
{
uint64_t msr_content;
- bool user64 = rex_prefix & REX_W;
generate_exception_if(!mode_ring0(), EXC_GP, 0);
generate_exception_if(!in_protmode(ctxt, ops), EXC_GP, 0);
@@ -4907,16 +4906,17 @@ x86_emulate(
goto done;
generate_exception_if(!(msr_content & 0xfffc), EXC_GP, 0);
- generate_exception_if(user64 && (!is_canonical_address(_regs.edx) ||
- !is_canonical_address(_regs.ecx)),
+ generate_exception_if(op_bytes == 8 &&
+ (!is_canonical_address(_regs.edx) ||
+ !is_canonical_address(_regs.ecx)),
EXC_GP, 0);
cs.sel = (msr_content | 3) + /* SELECTOR_RPL_MASK */
- (user64 ? 32 : 16);
+ (op_bytes == 8 ? 32 : 16);
cs.base = 0; /* flat segment */
cs.limit = ~0u; /* 4GB limit */
- cs.attr.bytes = user64 ? 0xafb /* L+DB+P+DPL3+S+Code */
- : 0xcfb; /* G+DB+P+DPL3+S+Code */
+ cs.attr.bytes = op_bytes == 8 ? 0xafb /* L+DB+P+DPL3+S+Code */
+ : 0xcfb; /* G+DB+P+DPL3+S+Code */
sreg.sel = cs.sel + 8;
sreg.base = 0; /* flat segment */
@@ -4928,8 +4928,8 @@ x86_emulate(
(rc = ops->write_segment(x86_seg_ss, &sreg, ctxt)) != 0 )
goto done;
- _regs.eip = user64 ? _regs.edx : (uint32_t)_regs.edx;
- _regs.esp = user64 ? _regs.ecx : (uint32_t)_regs.ecx;
+ _regs.eip = op_bytes == 8 ? _regs.edx : (uint32_t)_regs.edx;
+ _regs.esp = op_bytes == 8 ? _regs.ecx : (uint32_t)_regs.ecx;
if ( _regs.eflags & EFLG_TF )
ctxt->retire.singlestep = true;
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 13+ messages in thread