* [PATCH] x86emul: honor MXCSR.MM
@ 2016-10-13 12:57 Jan Beulich
2016-10-13 13:26 ` Andrew Cooper
0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2016-10-13 12:57 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 2305 bytes --]
Commit 6dc9ac9f52 ("x86emul: check alignment of SSE and AVX memory
operands") didn't consider a specific AMD mode: Mis-alignment #GP
faults can be masked on some of their hardware.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -446,6 +446,9 @@ typedef union {
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)
+/* MXCSR bit definitions. */
+#define MXCSR_MM (1U << 17)
+
/* Exception definitions. */
#define EXC_DE 0
#define EXC_DB 1
@@ -1253,6 +1256,7 @@ static bool_t vcpu_has(
#define vcpu_has_clflush() vcpu_has( 1, EDX, 19, ctxt, ops)
#define vcpu_has_lzcnt() vcpu_has(0x80000001, ECX, 5, ctxt, ops)
+#define vcpu_has_misalignsse() vcpu_has(0x80000001, ECX, 7, ctxt, ops)
#define vcpu_has_bmi1() vcpu_has(0x00000007, EBX, 3, ctxt, ops)
#define vcpu_has_hle() vcpu_has(0x00000007, EBX, 4, ctxt, ops)
#define vcpu_has_rtm() vcpu_has(0x00000007, EBX, 11, ctxt, ops)
@@ -4675,7 +4679,13 @@ x86_emulate(
ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4;
if ( ea.type == OP_MEM )
{
- generate_exception_if((b >= 0x28) &&
+ uint32_t mxcsr = 0;
+
+ if ( b < 0x28 )
+ mxcsr = MXCSR_MM;
+ else if ( vcpu_has_misalignsse() )
+ asm ( "stmxcsr %0" : "=m" (mxcsr) );
+ generate_exception_if(!(mxcsr & MXCSR_MM) &&
!is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
ctxt, ops),
EXC_GP, 0);
@@ -4955,7 +4965,13 @@ x86_emulate(
}
if ( ea.type == OP_MEM )
{
- generate_exception_if((vex.pfx == vex_66) &&
+ uint32_t mxcsr = 0;
+
+ if ( vex.pfx != vex_66 )
+ mxcsr = MXCSR_MM;
+ else if ( vcpu_has_misalignsse() )
+ asm ( "stmxcsr %0" : "=m" (mxcsr) );
+ generate_exception_if(!(mxcsr & MXCSR_MM) &&
!is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
ctxt, ops),
EXC_GP, 0);
[-- Attachment #2: x86emul-misaligned-SSE.patch --]
[-- Type: text/plain, Size: 2326 bytes --]
x86emul: honor MXCSR.MM
Commit 6dc9ac9f52 ("x86emul: check alignment of SSE and AVX memory
operands") didn't consider a specific AMD mode: Mis-alignment #GP
faults can be masked on some of their hardware.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -446,6 +446,9 @@ typedef union {
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)
+/* MXCSR bit definitions. */
+#define MXCSR_MM (1U << 17)
+
/* Exception definitions. */
#define EXC_DE 0
#define EXC_DB 1
@@ -1253,6 +1256,7 @@ static bool_t vcpu_has(
#define vcpu_has_clflush() vcpu_has( 1, EDX, 19, ctxt, ops)
#define vcpu_has_lzcnt() vcpu_has(0x80000001, ECX, 5, ctxt, ops)
+#define vcpu_has_misalignsse() vcpu_has(0x80000001, ECX, 7, ctxt, ops)
#define vcpu_has_bmi1() vcpu_has(0x00000007, EBX, 3, ctxt, ops)
#define vcpu_has_hle() vcpu_has(0x00000007, EBX, 4, ctxt, ops)
#define vcpu_has_rtm() vcpu_has(0x00000007, EBX, 11, ctxt, ops)
@@ -4675,7 +4679,13 @@ x86_emulate(
ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4;
if ( ea.type == OP_MEM )
{
- generate_exception_if((b >= 0x28) &&
+ uint32_t mxcsr = 0;
+
+ if ( b < 0x28 )
+ mxcsr = MXCSR_MM;
+ else if ( vcpu_has_misalignsse() )
+ asm ( "stmxcsr %0" : "=m" (mxcsr) );
+ generate_exception_if(!(mxcsr & MXCSR_MM) &&
!is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
ctxt, ops),
EXC_GP, 0);
@@ -4955,7 +4965,13 @@ x86_emulate(
}
if ( ea.type == OP_MEM )
{
- generate_exception_if((vex.pfx == vex_66) &&
+ uint32_t mxcsr = 0;
+
+ if ( vex.pfx != vex_66 )
+ mxcsr = MXCSR_MM;
+ else if ( vcpu_has_misalignsse() )
+ asm ( "stmxcsr %0" : "=m" (mxcsr) );
+ generate_exception_if(!(mxcsr & MXCSR_MM) &&
!is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
ctxt, ops),
EXC_GP, 0);
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] x86emul: honor MXCSR.MM
2016-10-13 12:57 [PATCH] x86emul: honor MXCSR.MM Jan Beulich
@ 2016-10-13 13:26 ` Andrew Cooper
2016-10-14 6:20 ` Jan Beulich
0 siblings, 1 reply; 4+ messages in thread
From: Andrew Cooper @ 2016-10-13 13:26 UTC (permalink / raw)
To: Jan Beulich, xen-devel
On 13/10/16 13:57, Jan Beulich wrote:
> Commit 6dc9ac9f52 ("x86emul: check alignment of SSE and AVX memory
> operands") didn't consider a specific AMD mode: Mis-alignment #GP
> faults can be masked on some of their hardware.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
This highlights that the following CPUID dependency change is also required
diff --git a/xen/tools/gen-cpuid.py b/xen/tools/gen-cpuid.py
index 33e68eb..e803654 100755
--- a/xen/tools/gen-cpuid.py
+++ b/xen/tools/gen-cpuid.py
@@ -185,8 +185,9 @@ def crunch_numbers(state):
# the first place.
APIC: [X2APIC],
- # AMD built MMXExtentions and 3DNow as extentions to MMX.
- MMX: [MMXEXT, _3DNOW],
+ # AMD built MMXExtentions, 3DNow and SSE Misalignment as
extensions to
+ # MMX.
+ MMX: [MMXEXT, _3DNOW, MISALIGNSSE],
# The FXSAVE/FXRSTOR instructions were introduced into hardware
before
# SSE, which is why they behave differently based on
%CR4.OSFXSAVE and
>
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -446,6 +446,9 @@ typedef union {
> #define EFLG_PF (1<<2)
> #define EFLG_CF (1<<0)
>
> +/* MXCSR bit definitions. */
> +#define MXCSR_MM (1U << 17)
> +
> /* Exception definitions. */
> #define EXC_DE 0
> #define EXC_DB 1
> @@ -1253,6 +1256,7 @@ static bool_t vcpu_has(
>
> #define vcpu_has_clflush() vcpu_has( 1, EDX, 19, ctxt, ops)
> #define vcpu_has_lzcnt() vcpu_has(0x80000001, ECX, 5, ctxt, ops)
> +#define vcpu_has_misalignsse() vcpu_has(0x80000001, ECX, 7, ctxt, ops)
> #define vcpu_has_bmi1() vcpu_has(0x00000007, EBX, 3, ctxt, ops)
> #define vcpu_has_hle() vcpu_has(0x00000007, EBX, 4, ctxt, ops)
> #define vcpu_has_rtm() vcpu_has(0x00000007, EBX, 11, ctxt, ops)
> @@ -4675,7 +4679,13 @@ x86_emulate(
> ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4;
> if ( ea.type == OP_MEM )
> {
> - generate_exception_if((b >= 0x28) &&
> + uint32_t mxcsr = 0;
> +
> + if ( b < 0x28 )
> + mxcsr = MXCSR_MM;
> + else if ( vcpu_has_misalignsse() )
> + asm ( "stmxcsr %0" : "=m" (mxcsr) );
> + generate_exception_if(!(mxcsr & MXCSR_MM) &&
> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
> ctxt, ops),
> EXC_GP, 0);
> @@ -4955,7 +4965,13 @@ x86_emulate(
> }
> if ( ea.type == OP_MEM )
> {
> - generate_exception_if((vex.pfx == vex_66) &&
> + uint32_t mxcsr = 0;
> +
> + if ( vex.pfx != vex_66 )
> + mxcsr = MXCSR_MM;
> + else if ( vcpu_has_misalignsse() )
> + asm ( "stmxcsr %0" : "=m" (mxcsr) );
> + generate_exception_if(!(mxcsr & MXCSR_MM) &&
> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
> ctxt, ops),
> EXC_GP, 0);
According to the docs, we should also be possibly raising #AC here.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] x86emul: honor MXCSR.MM
2016-10-13 13:26 ` Andrew Cooper
@ 2016-10-14 6:20 ` Jan Beulich
2016-10-14 11:55 ` Andrew Cooper
0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2016-10-14 6:20 UTC (permalink / raw)
To: Andrew Cooper; +Cc: xen-devel
>>> On 13.10.16 at 15:26, <andrew.cooper3@citrix.com> wrote:
> On 13/10/16 13:57, Jan Beulich wrote:
>> Commit 6dc9ac9f52 ("x86emul: check alignment of SSE and AVX memory
>> operands") didn't consider a specific AMD mode: Mis-alignment #GP
>> faults can be masked on some of their hardware.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> This highlights that the following CPUID dependency change is also required
>
> diff --git a/xen/tools/gen-cpuid.py b/xen/tools/gen-cpuid.py
> index 33e68eb..e803654 100755
> --- a/xen/tools/gen-cpuid.py
> +++ b/xen/tools/gen-cpuid.py
> @@ -185,8 +185,9 @@ def crunch_numbers(state):
> # the first place.
> APIC: [X2APIC],
>
> - # AMD built MMXExtentions and 3DNow as extentions to MMX.
> - MMX: [MMXEXT, _3DNOW],
> + # AMD built MMXExtentions, 3DNow and SSE Misalignment as
> extensions to
> + # MMX.
> + MMX: [MMXEXT, _3DNOW, MISALIGNSSE],
>
> # The FXSAVE/FXRSTOR instructions were introduced into hardware
> before
> # SSE, which is why they behave differently based on
> %CR4.OSFXSAVE and
Hmm, for one this is an orthogonal change, so doesn't belong in this
patch. And then - why is this an extension to MMX (which doesn't
have any alignment requirements anyway) rather than SSE?
>> @@ -4675,7 +4679,13 @@ x86_emulate(
>> ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4;
>> if ( ea.type == OP_MEM )
>> {
>> - generate_exception_if((b >= 0x28) &&
>> + uint32_t mxcsr = 0;
>> +
>> + if ( b < 0x28 )
>> + mxcsr = MXCSR_MM;
>> + else if ( vcpu_has_misalignsse() )
>> + asm ( "stmxcsr %0" : "=m" (mxcsr) );
>> + generate_exception_if(!(mxcsr & MXCSR_MM) &&
>> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
>> ctxt, ops),
>> EXC_GP, 0);
>> @@ -4955,7 +4965,13 @@ x86_emulate(
>> }
>> if ( ea.type == OP_MEM )
>> {
>> - generate_exception_if((vex.pfx == vex_66) &&
>> + uint32_t mxcsr = 0;
>> +
>> + if ( vex.pfx != vex_66 )
>> + mxcsr = MXCSR_MM;
>> + else if ( vcpu_has_misalignsse() )
>> + asm ( "stmxcsr %0" : "=m" (mxcsr) );
>> + generate_exception_if(!(mxcsr & MXCSR_MM) &&
>> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
>> ctxt, ops),
>> EXC_GP, 0);
>
> According to the docs, we should also be possibly raising #AC here.
Well, you've said the same in a different context not so long ago,
and my answer is then also the same: As long as we don't do any
#AC generation, I see no reason why we would want to create an
exception here.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] x86emul: honor MXCSR.MM
2016-10-14 6:20 ` Jan Beulich
@ 2016-10-14 11:55 ` Andrew Cooper
0 siblings, 0 replies; 4+ messages in thread
From: Andrew Cooper @ 2016-10-14 11:55 UTC (permalink / raw)
To: Jan Beulich; +Cc: xen-devel
On 14/10/16 07:20, Jan Beulich wrote:
>>>> On 13.10.16 at 15:26, <andrew.cooper3@citrix.com> wrote:
>> On 13/10/16 13:57, Jan Beulich wrote:
>>> Commit 6dc9ac9f52 ("x86emul: check alignment of SSE and AVX memory
>>> operands") didn't consider a specific AMD mode: Mis-alignment #GP
>>> faults can be masked on some of their hardware.
>>>
>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> This highlights that the following CPUID dependency change is also required
>>
>> diff --git a/xen/tools/gen-cpuid.py b/xen/tools/gen-cpuid.py
>> index 33e68eb..e803654 100755
>> --- a/xen/tools/gen-cpuid.py
>> +++ b/xen/tools/gen-cpuid.py
>> @@ -185,8 +185,9 @@ def crunch_numbers(state):
>> # the first place.
>> APIC: [X2APIC],
>>
>> - # AMD built MMXExtentions and 3DNow as extentions to MMX.
>> - MMX: [MMXEXT, _3DNOW],
>> + # AMD built MMXExtentions, 3DNow and SSE Misalignment as
>> extensions to
>> + # MMX.
>> + MMX: [MMXEXT, _3DNOW, MISALIGNSSE],
>>
>> # The FXSAVE/FXRSTOR instructions were introduced into hardware
>> before
>> # SSE, which is why they behave differently based on
>> %CR4.OSFXSAVE and
> Hmm, for one this is an orthogonal change, so doesn't belong in this
> patch. And then - why is this an extension to MMX (which doesn't
> have any alignment requirements anyway) rather than SSE?
Sorry - I have no idea why I suggested MMX. It should have been SSE.
>
>>> @@ -4675,7 +4679,13 @@ x86_emulate(
>>> ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4;
>>> if ( ea.type == OP_MEM )
>>> {
>>> - generate_exception_if((b >= 0x28) &&
>>> + uint32_t mxcsr = 0;
>>> +
>>> + if ( b < 0x28 )
>>> + mxcsr = MXCSR_MM;
>>> + else if ( vcpu_has_misalignsse() )
>>> + asm ( "stmxcsr %0" : "=m" (mxcsr) );
>>> + generate_exception_if(!(mxcsr & MXCSR_MM) &&
>>> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
>>> ctxt, ops),
>>> EXC_GP, 0);
>>> @@ -4955,7 +4965,13 @@ x86_emulate(
>>> }
>>> if ( ea.type == OP_MEM )
>>> {
>>> - generate_exception_if((vex.pfx == vex_66) &&
>>> + uint32_t mxcsr = 0;
>>> +
>>> + if ( vex.pfx != vex_66 )
>>> + mxcsr = MXCSR_MM;
>>> + else if ( vcpu_has_misalignsse() )
>>> + asm ( "stmxcsr %0" : "=m" (mxcsr) );
>>> + generate_exception_if(!(mxcsr & MXCSR_MM) &&
>>> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes,
>>> ctxt, ops),
>>> EXC_GP, 0);
>> According to the docs, we should also be possibly raising #AC here.
> Well, you've said the same in a different context not so long ago,
> and my answer is then also the same: As long as we don't do any
> #AC generation, I see no reason why we would want to create an
> exception here.
Ah yes. I had managed to let that slip my mind.
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2016-10-14 11:55 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-13 12:57 [PATCH] x86emul: honor MXCSR.MM Jan Beulich
2016-10-13 13:26 ` Andrew Cooper
2016-10-14 6:20 ` Jan Beulich
2016-10-14 11:55 ` Andrew Cooper
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.