xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/3] x86/emulate: add support for {, v}movq xmm, xmm/m64
@ 2016-07-18 14:30 Mihai Donțu
  2016-07-18 14:30 ` [PATCH v2 2/3] x86/emulate: add support of emulating SSE2 instruction {, v}movd mm, m32 Mihai Donțu
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Mihai Donțu @ 2016-07-18 14:30 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Jan Beulich

Signed-off-by: Mihai Donțu <mdontu@bitdefender.com>
---
Changed since v1:
 * added a test for vmovq
 * made the tests depend on SSE and AVX, respectively
 * added emulator support for vmovq (0xd6 forces the operand size to
   64bit)
---
 tools/tests/x86_emulator/test_x86_emulator.c | 44 ++++++++++++++++++++++++++++
 xen/arch/x86/x86_emulate/x86_emulate.c       |  9 +++---
 2 files changed, 49 insertions(+), 4 deletions(-)

diff --git a/tools/tests/x86_emulator/test_x86_emulator.c b/tools/tests/x86_emulator/test_x86_emulator.c
index c7f572a..8994149 100644
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -697,6 +697,50 @@ int main(int argc, char **argv)
     else
         printf("skipped\n");
 
+    printf("%-40s", "Testing movq %%xmm0,32(%%eax)...");
+    if ( stack_exec && cpu_has_sse )
+    {
+        decl_insn(movq_to_mem2);
+
+        asm volatile ( "pcmpgtb %%xmm0, %%xmm0\n"
+                       put_insn(movq_to_mem2, "movq %%xmm0, 32(%%eax)")
+                       :: );
+
+        *((unsigned long *)res + 4) = 0xbdbdbdbdbdbdbdbd;
+        set_insn(movq_to_mem2);
+        regs.eax = (unsigned long)res;
+        rc = x86_emulate(&ctxt, &emulops);
+        if ( rc != X86EMUL_OKAY || !check_eip(movq_to_mem2) )
+            goto fail;
+        if ( *((unsigned long *)res + 4) )
+            goto fail;
+        printf("okay\n");
+    }
+    else
+        printf("skipped\n");
+
+    printf("%-40s", "Testing vmovq %%xmm1,32(%%eax)...");
+    if ( stack_exec && cpu_has_avx )
+    {
+        decl_insn(vmovq_to_mem);
+
+        asm volatile ( "pcmpgtb %%xmm1, %%xmm1\n"
+                       put_insn(vmovq_to_mem, "vmovq %%xmm1, 32(%%eax)")
+                       :: );
+
+        *((unsigned long *)res + 4) = 0xbdbdbdbdbdbdbdbd;
+        set_insn(vmovq_to_mem);
+        regs.eax = (unsigned long)res;
+        rc = x86_emulate(&ctxt, &emulops);
+        if ( rc != X86EMUL_OKAY || !check_eip(vmovq_to_mem) )
+            goto fail;
+        if ( *((unsigned long *)res + 4) )
+            goto fail;
+        printf("okay\n");
+    }
+    else
+        printf("skipped\n");
+
     printf("%-40s", "Testing movdqu %xmm2,(%ecx)...");
     if ( stack_exec && cpu_has_sse2 )
     {
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index fe594ba..0301235 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -245,7 +245,7 @@ static uint8_t twobyte_table[256] = {
     ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
     ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
     /* 0xD0 - 0xDF */
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, ImplicitOps|ModRM, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     /* 0xE0 - 0xEF */
     0, 0, 0, 0, 0, 0, 0, ImplicitOps|ModRM, 0, 0, 0, 0, 0, 0, 0, 0,
     /* 0xF0 - 0xFF */
@@ -4412,6 +4412,7 @@ x86_emulate(
     case 0x7f: /* movq mm,mm/m64 */
                /* {,v}movdq{a,u} xmm,xmm/m128 */
                /* vmovdq{a,u} ymm,ymm/m256 */
+    case 0xd6: /* {,v}movq xmm,xmm/m64 */
     {
         uint8_t *buf = get_stub(stub);
         struct fpu_insn_ctxt fic = { .insn_bytes = 5 };
@@ -4429,9 +4430,9 @@ x86_emulate(
             case vex_66:
             case vex_f3:
                 host_and_vcpu_must_have(sse2);
-                buf[0] = 0x66; /* movdqa */
+                buf[0] = 0x66; /* SSE */
                 get_fpu(X86EMUL_FPU_xmm, &fic);
-                ea.bytes = 16;
+                ea.bytes = (b == 0xd6 ? 8 : 16);
                 break;
             case vex_none:
                 if ( b != 0xe7 )
@@ -4451,7 +4452,7 @@ x86_emulate(
                     ((vex.pfx != vex_66) && (vex.pfx != vex_f3)));
             host_and_vcpu_must_have(avx);
             get_fpu(X86EMUL_FPU_ymm, &fic);
-            ea.bytes = 16 << vex.l;
+            ea.bytes = (b == 0xd6 ? 8 : 16 << vex.l);
         }
         if ( ea.type == OP_MEM )
         {
-- 
2.9.2


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2016-07-19 20:35 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-18 14:30 [PATCH v2 1/3] x86/emulate: add support for {, v}movq xmm, xmm/m64 Mihai Donțu
2016-07-18 14:30 ` [PATCH v2 2/3] x86/emulate: add support of emulating SSE2 instruction {, v}movd mm, m32 Mihai Donțu
2016-07-18 14:57   ` Andrew Cooper
2016-07-19 20:35     ` Mihai Donțu
2016-07-18 14:30 ` [PATCH v2 3/3] x86/emulate: added tests for " Mihai Donțu
2016-07-18 14:57   ` Andrew Cooper
2016-07-18 14:41 ` [PATCH v2 1/3] x86/emulate: add support for {, v}movq xmm, xmm/m64 Andrew Cooper

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).