qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Edmondson <david.edmondson@oracle.com>
To: qemu-devel@nongnu.org
Cc: Eduardo Habkost <ehabkost@redhat.com>,
	kvm@vger.kernel.org, Marcelo Tosatti <mtosatti@redhat.com>,
	Richard Henderson <richard.henderson@linaro.org>,
	David Edmondson <david.edmondson@oracle.com>,
	Babu Moger <babu.moger@amd.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Subject: [RFC PATCH 7/7] target/i386: Manipulate only AMD XSAVE state on AMD
Date: Thu, 20 May 2021 15:56:47 +0100	[thread overview]
Message-ID: <20210520145647.3483809-8-david.edmondson@oracle.com> (raw)
In-Reply-To: <20210520145647.3483809-1-david.edmondson@oracle.com>

On AMD CPUs, ensure to save/load only the relevant XSAVE state.

Signed-off-by: David Edmondson <david.edmondson@oracle.com>
---
 target/i386/tcg/fpu_helper.c | 12 +++++--
 target/i386/xsave_helper.c   | 70 ++++++++++++++++++++++--------------
 2 files changed, 54 insertions(+), 28 deletions(-)

diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
index fba2de5b04..f1d4704b34 100644
--- a/target/i386/tcg/fpu_helper.c
+++ b/target/i386/tcg/fpu_helper.c
@@ -2643,7 +2643,11 @@ static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm,
         do_xsave_bndcsr(env, ptr + XO(intel.bndcsr_state), ra);
     }
     if (opt & XSTATE_PKRU_MASK) {
-        do_xsave_pkru(env, ptr + XO(intel.pkru_state), ra);
+        if (IS_AMD_CPU(env)) {
+            do_xsave_pkru(env, ptr + XO(amd.pkru_state), ra);
+        } else {
+            do_xsave_pkru(env, ptr + XO(intel.pkru_state), ra);
+        }
     }
 
     /* Update the XSTATE_BV field.  */
@@ -2854,7 +2858,11 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
     if (rfbm & XSTATE_PKRU_MASK) {
         uint64_t old_pkru = env->pkru;
         if (xstate_bv & XSTATE_PKRU_MASK) {
-            do_xrstor_pkru(env, ptr + XO(intel.pkru_state), ra);
+            if (IS_AMD_CPU(env)) {
+                do_xrstor_pkru(env, ptr + XO(amd.pkru_state), ra);
+            } else {
+                do_xrstor_pkru(env, ptr + XO(intel.pkru_state), ra);
+            }
         } else {
             env->pkru = 0;
         }
diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
index 97dbab85d1..6b4501cf29 100644
--- a/target/i386/xsave_helper.c
+++ b/target/i386/xsave_helper.c
@@ -10,6 +10,7 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf)
 {
     CPUX86State *env = &cpu->env;
     X86XSaveArea *xsave = buf;
+    const bool is_amd = IS_AMD_CPU(env);
 
     uint16_t cwd, swd, twd;
     int i;
@@ -31,30 +32,38 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf)
             sizeof env->fpregs);
     xsave->legacy.mxcsr = env->mxcsr;
     xsave->header.xstate_bv = env->xstate_bv;
-    memcpy(&xsave->intel.bndreg_state.bnd_regs, env->bnd_regs,
-            sizeof env->bnd_regs);
-    xsave->intel.bndcsr_state.bndcsr = env->bndcs_regs;
-    memcpy(&xsave->intel.opmask_state.opmask_regs, env->opmask_regs,
-            sizeof env->opmask_regs);
+    if (!is_amd) {
+        memcpy(&xsave->intel.bndreg_state.bnd_regs, env->bnd_regs,
+               sizeof env->bnd_regs);
+        xsave->intel.bndcsr_state.bndcsr = env->bndcs_regs;
+        memcpy(&xsave->intel.opmask_state.opmask_regs, env->opmask_regs,
+               sizeof env->opmask_regs);
+    }
 
     for (i = 0; i < CPU_NB_REGS; i++) {
         uint8_t *xmm = xsave->legacy.xmm_regs[i];
         uint8_t *ymmh = xsave->avx_state.ymmh[i];
-        uint8_t *zmmh = xsave->intel.zmm_hi256_state.zmm_hi256[i];
         stq_p(xmm,     env->xmm_regs[i].ZMM_Q(0));
         stq_p(xmm+8,   env->xmm_regs[i].ZMM_Q(1));
         stq_p(ymmh,    env->xmm_regs[i].ZMM_Q(2));
         stq_p(ymmh+8,  env->xmm_regs[i].ZMM_Q(3));
-        stq_p(zmmh,    env->xmm_regs[i].ZMM_Q(4));
-        stq_p(zmmh+8,  env->xmm_regs[i].ZMM_Q(5));
-        stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
-        stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
+        if (!is_amd) {
+            uint8_t *zmmh = xsave->intel.zmm_hi256_state.zmm_hi256[i];
+            stq_p(zmmh,    env->xmm_regs[i].ZMM_Q(4));
+            stq_p(zmmh+8,  env->xmm_regs[i].ZMM_Q(5));
+            stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
+            stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
+        }
     }
 
 #ifdef TARGET_X86_64
-    memcpy(&xsave->intel.hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
-            16 * sizeof env->xmm_regs[16]);
-    memcpy(&xsave->intel.pkru_state, &env->pkru, sizeof env->pkru);
+    if (is_amd) {
+        memcpy(&xsave->amd.pkru_state, &env->pkru, sizeof env->pkru);
+    } else {
+        memcpy(&xsave->intel.hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
+               16 * sizeof env->xmm_regs[16]);
+        memcpy(&xsave->intel.pkru_state, &env->pkru, sizeof env->pkru);
+    }
 #endif
 
 }
@@ -64,6 +73,7 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf)
 
     CPUX86State *env = &cpu->env;
     const X86XSaveArea *xsave = buf;
+    const bool is_amd = IS_AMD_CPU(env);
 
     int i;
     uint16_t cwd, swd, twd;
@@ -83,30 +93,38 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf)
     memcpy(env->fpregs, &xsave->legacy.fpregs,
             sizeof env->fpregs);
     env->xstate_bv = xsave->header.xstate_bv;
-    memcpy(env->bnd_regs, &xsave->intel.bndreg_state.bnd_regs,
-            sizeof env->bnd_regs);
-    env->bndcs_regs = xsave->intel.bndcsr_state.bndcsr;
-    memcpy(env->opmask_regs, &xsave->intel.opmask_state.opmask_regs,
-            sizeof env->opmask_regs);
+    if (!is_amd) {
+        memcpy(env->bnd_regs, &xsave->intel.bndreg_state.bnd_regs,
+               sizeof env->bnd_regs);
+        env->bndcs_regs = xsave->intel.bndcsr_state.bndcsr;
+        memcpy(env->opmask_regs, &xsave->intel.opmask_state.opmask_regs,
+               sizeof env->opmask_regs);
+    }
 
     for (i = 0; i < CPU_NB_REGS; i++) {
         const uint8_t *xmm = xsave->legacy.xmm_regs[i];
         const uint8_t *ymmh = xsave->avx_state.ymmh[i];
-        const uint8_t *zmmh = xsave->intel.zmm_hi256_state.zmm_hi256[i];
         env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
         env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
         env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
         env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
-        env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
-        env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
-        env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
-        env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
+        if (!is_amd) {
+            const uint8_t *zmmh = xsave->intel.zmm_hi256_state.zmm_hi256[i];
+            env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
+            env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
+            env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
+            env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
+        }
     }
 
 #ifdef TARGET_X86_64
-    memcpy(&env->xmm_regs[16], &xsave->intel.hi16_zmm_state.hi16_zmm,
-           16 * sizeof env->xmm_regs[16]);
-    memcpy(&env->pkru, &xsave->intel.pkru_state, sizeof env->pkru);
+    if (is_amd) {
+        memcpy(&env->pkru, &xsave->amd.pkru_state, sizeof env->pkru);
+    } else {
+        memcpy(&env->xmm_regs[16], &xsave->intel.hi16_zmm_state.hi16_zmm,
+               16 * sizeof env->xmm_regs[16]);
+        memcpy(&env->pkru, &xsave->intel.pkru_state, sizeof env->pkru);
+    }
 #endif
 
 }
-- 
2.30.2



  parent reply	other threads:[~2021-05-20 15:02 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-20 14:56 [RFC PATCH 0/7] Support protection keys in an AMD EPYC-Milan VM David Edmondson
2021-05-20 14:56 ` [RFC PATCH 1/7] target/i386: Declare constants for XSAVE offsets David Edmondson
2021-05-20 14:56 ` [RFC PATCH 2/7] target/i386: Use " David Edmondson
2021-05-20 14:56 ` [RFC PATCH 3/7] target/i386: Clarify the padding requirements of X86XSaveArea David Edmondson
2021-05-20 14:56 ` [RFC PATCH 4/7] target/i386: Prepare for per-vendor X86XSaveArea layout David Edmondson
2021-05-20 14:56 ` [RFC PATCH 5/7] target/i386: Introduce AMD X86XSaveArea sub-union David Edmondson
2021-05-20 14:56 ` [RFC PATCH 6/7] target/i386: Adjust AMD XSAVE PKRU area offset in CPUID leaf 0xd David Edmondson
2021-05-20 14:56 ` David Edmondson [this message]
2021-05-20 15:15 ` [RFC PATCH 0/7] Support protection keys in an AMD EPYC-Milan VM no-reply
2021-06-08  8:24 ` David Edmondson
2021-07-01 21:24   ` Babu Moger
2021-07-01 21:32     ` David Edmondson
2021-06-11 16:01 ` Paolo Bonzini
2021-06-14 16:21   ` David Edmondson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210520145647.3483809-8-david.edmondson@oracle.com \
    --to=david.edmondson@oracle.com \
    --cc=babu.moger@amd.com \
    --cc=ehabkost@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).