All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Edmondson <david.edmondson@oracle.com>
To: qemu-devel@nongnu.org
Cc: Richard Henderson <richard.henderson@linaro.org>,
	Michael Roth <michael.roth@amd.com>,
	kvm@vger.kernel.org, Roman Bolshakov <r.bolshakov@yadro.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	babu.moger@amd.com, Cameron Esfahani <dirty@apple.com>,
	Eduardo Habkost <ehabkost@redhat.com>,
	David Edmondson <david.edmondson@oracle.com>
Subject: [RFC PATCH 7/8] target/i386: Populate x86_ext_save_areas offsets using cpuid where possible
Date: Mon,  5 Jul 2021 11:46:31 +0100	[thread overview]
Message-ID: <20210705104632.2902400-8-david.edmondson@oracle.com> (raw)
In-Reply-To: <20210705104632.2902400-1-david.edmondson@oracle.com>

Rather than relying on the X86XSaveArea structure definition,
determine the offset of XSAVE state areas using CPUID leaf 0xd where
possible (KVM and HVF).

Signed-off-by: David Edmondson <david.edmondson@oracle.com>
---
 target/i386/cpu.c         | 13 +------------
 target/i386/cpu.h         |  2 +-
 target/i386/hvf/hvf-cpu.c | 34 ++++++++++++++++++++++++++++++++++
 target/i386/kvm/kvm-cpu.c | 36 ++++++++++++++++++++++++++++++++++++
 target/i386/tcg/tcg-cpu.c | 20 ++++++++++++++++++++
 5 files changed, 92 insertions(+), 13 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 13caa0de50..5f595a0d7e 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1304,48 +1304,37 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
 };
 #undef REGISTER
 
-const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
+ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
     [XSTATE_FP_BIT] = {
         /* x87 FP state component is always enabled if XSAVE is supported */
         .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
-        /* x87 state is in the legacy region of the XSAVE area */
-        .offset = 0,
         .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
     },
     [XSTATE_SSE_BIT] = {
         /* SSE state component is always enabled if XSAVE is supported */
         .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
-        /* SSE state is in the legacy region of the XSAVE area */
-        .offset = 0,
         .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
     },
     [XSTATE_YMM_BIT] =
           { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
-            .offset = offsetof(X86XSaveArea, avx_state),
             .size = sizeof(XSaveAVX) },
     [XSTATE_BNDREGS_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
-            .offset = offsetof(X86XSaveArea, bndreg_state),
             .size = sizeof(XSaveBNDREG)  },
     [XSTATE_BNDCSR_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
-            .offset = offsetof(X86XSaveArea, bndcsr_state),
             .size = sizeof(XSaveBNDCSR)  },
     [XSTATE_OPMASK_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
-            .offset = offsetof(X86XSaveArea, opmask_state),
             .size = sizeof(XSaveOpmask) },
     [XSTATE_ZMM_Hi256_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
-            .offset = offsetof(X86XSaveArea, zmm_hi256_state),
             .size = sizeof(XSaveZMM_Hi256) },
     [XSTATE_Hi16_ZMM_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
-            .offset = offsetof(X86XSaveArea, hi16_zmm_state),
             .size = sizeof(XSaveHi16_ZMM) },
     [XSTATE_PKRU_BIT] =
           { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
-            .offset = offsetof(X86XSaveArea, pkru_state),
             .size = sizeof(XSavePKRU) },
 };
 
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c9c0a34330..96b672f8bd 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1377,7 +1377,7 @@ typedef struct ExtSaveArea {
 
 #define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1)
 
-extern const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
+extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
 
 typedef enum TPRAccess {
     TPR_ACCESS_READ,
diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c
index 8fbc423888..3c7c44698f 100644
--- a/target/i386/hvf/hvf-cpu.c
+++ b/target/i386/hvf/hvf-cpu.c
@@ -30,6 +30,38 @@ static void hvf_cpu_max_instance_init(X86CPU *cpu)
         hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
 }
 
+static void hvf_cpu_xsave_init(void)
+{
+    int i;
+
+    /*
+     * The allocated storage must be large enough for all of the
+     * possible XSAVE state components.
+     */
+    assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= 4096);
+
+    /* x87 state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
+    /* SSE state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
+
+    for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
+        ExtSaveArea *esa = &x86_ext_save_areas[i];
+
+        if (esa->size) {
+            int sz = hvf_get_supported_cpuid(0xd, i, R_EAX);
+
+            if (sz != 0) {
+                assert(esa->size == sz);
+
+                esa->offset = hvf_get_supported_cpuid(0xd, i, R_EBX);
+                fprintf(stderr, "%s: state area %d: offset 0x%x, size 0x%x\n",
+                        __func__, i, esa->offset, esa->size);
+            }
+        }
+    }
+}
+
 static void hvf_cpu_instance_init(CPUState *cs)
 {
     X86CPU *cpu = X86_CPU(cs);
@@ -42,6 +74,8 @@ static void hvf_cpu_instance_init(CPUState *cs)
     if (cpu->max_features) {
         hvf_cpu_max_instance_init(cpu);
     }
+
+    hvf_cpu_xsave_init();
 }
 
 static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data)
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
index 00369c2000..f474cc5b83 100644
--- a/target/i386/kvm/kvm-cpu.c
+++ b/target/i386/kvm/kvm-cpu.c
@@ -122,6 +122,40 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu)
         kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
 }
 
+static void kvm_cpu_xsave_init(void)
+{
+    KVMState *s = kvm_state;
+    int i;
+
+    /*
+     * The allocated storage must be large enough for all of the
+     * possible XSAVE state components.
+     */
+    assert(sizeof(struct kvm_xsave) >=
+           kvm_arch_get_supported_cpuid(s, 0xd, 0, R_ECX));
+
+    /* x87 state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
+    /* SSE state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
+
+    for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
+        ExtSaveArea *esa = &x86_ext_save_areas[i];
+
+        if (esa->size) {
+            int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX);
+
+            if (sz != 0) {
+                assert(esa->size == sz);
+
+                esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX);
+                fprintf(stderr, "%s: state area %d: offset 0x%x, size 0x%x\n",
+                        __func__, i, esa->offset, esa->size);
+            }
+        }
+    }
+}
+
 static void kvm_cpu_instance_init(CPUState *cs)
 {
     X86CPU *cpu = X86_CPU(cs);
@@ -141,6 +175,8 @@ static void kvm_cpu_instance_init(CPUState *cs)
     if (cpu->max_features) {
         kvm_cpu_max_instance_init(cpu);
     }
+
+    kvm_cpu_xsave_init();
 }
 
 static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 014ebea2f6..e96ec9bbcc 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -80,6 +80,24 @@ static void tcg_cpu_class_init(CPUClass *cc)
     cc->init_accel_cpu = tcg_cpu_init_ops;
 }
 
+static void tcg_cpu_xsave_init(void)
+{
+#define XO(bit, field) \
+    x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
+
+    XO(XSTATE_FP_BIT, legacy);
+    XO(XSTATE_SSE_BIT, legacy);
+    XO(XSTATE_YMM_BIT, avx_state);
+    XO(XSTATE_BNDREGS_BIT, bndreg_state);
+    XO(XSTATE_BNDCSR_BIT, bndcsr_state);
+    XO(XSTATE_OPMASK_BIT, opmask_state);
+    XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
+    XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
+    XO(XSTATE_PKRU_BIT, pkru_state);
+
+#undef XO
+}
+
 /*
  * TCG-specific defaults that override all CPU models when using TCG
  */
@@ -93,6 +111,8 @@ static void tcg_cpu_instance_init(CPUState *cs)
     X86CPU *cpu = X86_CPU(cs);
     /* Special cases not set in the X86CPUDefinition structs: */
     x86_cpu_apply_props(cpu, tcg_default_props);
+
+    tcg_cpu_xsave_init();
 }
 
 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
-- 
2.30.2


WARNING: multiple messages have this Message-ID
From: David Edmondson <david.edmondson@oracle.com>
To: qemu-devel@nongnu.org
Cc: Eduardo Habkost <ehabkost@redhat.com>,
	kvm@vger.kernel.org, Michael Roth <michael.roth@amd.com>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	Richard Henderson <richard.henderson@linaro.org>,
	Cameron Esfahani <dirty@apple.com>,
	David Edmondson <david.edmondson@oracle.com>,
	babu.moger@amd.com, Roman Bolshakov <r.bolshakov@yadro.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Subject: [RFC PATCH 7/8] target/i386: Populate x86_ext_save_areas offsets using cpuid where possible
Date: Mon,  5 Jul 2021 11:46:31 +0100	[thread overview]
Message-ID: <20210705104632.2902400-8-david.edmondson@oracle.com> (raw)
In-Reply-To: <20210705104632.2902400-1-david.edmondson@oracle.com>

Rather than relying on the X86XSaveArea structure definition,
determine the offset of XSAVE state areas using CPUID leaf 0xd where
possible (KVM and HVF).

Signed-off-by: David Edmondson <david.edmondson@oracle.com>
---
 target/i386/cpu.c         | 13 +------------
 target/i386/cpu.h         |  2 +-
 target/i386/hvf/hvf-cpu.c | 34 ++++++++++++++++++++++++++++++++++
 target/i386/kvm/kvm-cpu.c | 36 ++++++++++++++++++++++++++++++++++++
 target/i386/tcg/tcg-cpu.c | 20 ++++++++++++++++++++
 5 files changed, 92 insertions(+), 13 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 13caa0de50..5f595a0d7e 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1304,48 +1304,37 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
 };
 #undef REGISTER
 
-const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
+ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
     [XSTATE_FP_BIT] = {
         /* x87 FP state component is always enabled if XSAVE is supported */
         .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
-        /* x87 state is in the legacy region of the XSAVE area */
-        .offset = 0,
         .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
     },
     [XSTATE_SSE_BIT] = {
         /* SSE state component is always enabled if XSAVE is supported */
         .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
-        /* SSE state is in the legacy region of the XSAVE area */
-        .offset = 0,
         .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
     },
     [XSTATE_YMM_BIT] =
           { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
-            .offset = offsetof(X86XSaveArea, avx_state),
             .size = sizeof(XSaveAVX) },
     [XSTATE_BNDREGS_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
-            .offset = offsetof(X86XSaveArea, bndreg_state),
             .size = sizeof(XSaveBNDREG)  },
     [XSTATE_BNDCSR_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
-            .offset = offsetof(X86XSaveArea, bndcsr_state),
             .size = sizeof(XSaveBNDCSR)  },
     [XSTATE_OPMASK_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
-            .offset = offsetof(X86XSaveArea, opmask_state),
             .size = sizeof(XSaveOpmask) },
     [XSTATE_ZMM_Hi256_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
-            .offset = offsetof(X86XSaveArea, zmm_hi256_state),
             .size = sizeof(XSaveZMM_Hi256) },
     [XSTATE_Hi16_ZMM_BIT] =
           { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
-            .offset = offsetof(X86XSaveArea, hi16_zmm_state),
             .size = sizeof(XSaveHi16_ZMM) },
     [XSTATE_PKRU_BIT] =
           { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
-            .offset = offsetof(X86XSaveArea, pkru_state),
             .size = sizeof(XSavePKRU) },
 };
 
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c9c0a34330..96b672f8bd 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1377,7 +1377,7 @@ typedef struct ExtSaveArea {
 
 #define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1)
 
-extern const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
+extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
 
 typedef enum TPRAccess {
     TPR_ACCESS_READ,
diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c
index 8fbc423888..3c7c44698f 100644
--- a/target/i386/hvf/hvf-cpu.c
+++ b/target/i386/hvf/hvf-cpu.c
@@ -30,6 +30,38 @@ static void hvf_cpu_max_instance_init(X86CPU *cpu)
         hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
 }
 
+static void hvf_cpu_xsave_init(void)
+{
+    int i;
+
+    /*
+     * The allocated storage must be large enough for all of the
+     * possible XSAVE state components.
+     */
+    assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= 4096);
+
+    /* x87 state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
+    /* SSE state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
+
+    for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
+        ExtSaveArea *esa = &x86_ext_save_areas[i];
+
+        if (esa->size) {
+            int sz = hvf_get_supported_cpuid(0xd, i, R_EAX);
+
+            if (sz != 0) {
+                assert(esa->size == sz);
+
+                esa->offset = hvf_get_supported_cpuid(0xd, i, R_EBX);
+                fprintf(stderr, "%s: state area %d: offset 0x%x, size 0x%x\n",
+                        __func__, i, esa->offset, esa->size);
+            }
+        }
+    }
+}
+
 static void hvf_cpu_instance_init(CPUState *cs)
 {
     X86CPU *cpu = X86_CPU(cs);
@@ -42,6 +74,8 @@ static void hvf_cpu_instance_init(CPUState *cs)
     if (cpu->max_features) {
         hvf_cpu_max_instance_init(cpu);
     }
+
+    hvf_cpu_xsave_init();
 }
 
 static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data)
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
index 00369c2000..f474cc5b83 100644
--- a/target/i386/kvm/kvm-cpu.c
+++ b/target/i386/kvm/kvm-cpu.c
@@ -122,6 +122,40 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu)
         kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
 }
 
+static void kvm_cpu_xsave_init(void)
+{
+    KVMState *s = kvm_state;
+    int i;
+
+    /*
+     * The allocated storage must be large enough for all of the
+     * possible XSAVE state components.
+     */
+    assert(sizeof(struct kvm_xsave) >=
+           kvm_arch_get_supported_cpuid(s, 0xd, 0, R_ECX));
+
+    /* x87 state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
+    /* SSE state is in the legacy region of the XSAVE area. */
+    x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
+
+    for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
+        ExtSaveArea *esa = &x86_ext_save_areas[i];
+
+        if (esa->size) {
+            int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX);
+
+            if (sz != 0) {
+                assert(esa->size == sz);
+
+                esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX);
+                fprintf(stderr, "%s: state area %d: offset 0x%x, size 0x%x\n",
+                        __func__, i, esa->offset, esa->size);
+            }
+        }
+    }
+}
+
 static void kvm_cpu_instance_init(CPUState *cs)
 {
     X86CPU *cpu = X86_CPU(cs);
@@ -141,6 +175,8 @@ static void kvm_cpu_instance_init(CPUState *cs)
     if (cpu->max_features) {
         kvm_cpu_max_instance_init(cpu);
     }
+
+    kvm_cpu_xsave_init();
 }
 
 static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 014ebea2f6..e96ec9bbcc 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -80,6 +80,24 @@ static void tcg_cpu_class_init(CPUClass *cc)
     cc->init_accel_cpu = tcg_cpu_init_ops;
 }
 
+static void tcg_cpu_xsave_init(void)
+{
+#define XO(bit, field) \
+    x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
+
+    XO(XSTATE_FP_BIT, legacy);
+    XO(XSTATE_SSE_BIT, legacy);
+    XO(XSTATE_YMM_BIT, avx_state);
+    XO(XSTATE_BNDREGS_BIT, bndreg_state);
+    XO(XSTATE_BNDCSR_BIT, bndcsr_state);
+    XO(XSTATE_OPMASK_BIT, opmask_state);
+    XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
+    XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
+    XO(XSTATE_PKRU_BIT, pkru_state);
+
+#undef XO
+}
+
 /*
  * TCG-specific defaults that override all CPU models when using TCG
  */
@@ -93,6 +111,8 @@ static void tcg_cpu_instance_init(CPUState *cs)
     X86CPU *cpu = X86_CPU(cs);
     /* Special cases not set in the X86CPUDefinition structs: */
     x86_cpu_apply_props(cpu, tcg_default_props);
+
+    tcg_cpu_xsave_init();
 }
 
 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
-- 
2.30.2



  parent reply	other threads:[~2021-07-05 10:46 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-05 10:46 [RFC PATCH 0/8] Derive XSAVE state component offsets from CPUID leaf 0xd where possible David Edmondson
2021-07-05 10:46 ` David Edmondson
2021-07-05 10:46 ` [RFC PATCH 1/8] target/i386: Declare constants for XSAVE offsets David Edmondson
2021-07-05 10:46   ` David Edmondson
2021-07-05 10:46 ` [RFC PATCH 2/8] target/i386: Consolidate the X86XSaveArea offset checks David Edmondson
2021-07-05 10:46   ` David Edmondson
2021-07-05 10:46 ` [RFC PATCH 3/8] target/i386: Clarify the padding requirements of X86XSaveArea David Edmondson
2021-07-05 10:46   ` David Edmondson
2021-07-05 10:46 ` [RFC PATCH 4/8] target/i386: Pass buffer and length to XSAVE helper David Edmondson
2021-07-05 10:46   ` David Edmondson
2021-07-05 10:46 ` [RFC PATCH 5/8] target/i386: Make x86_ext_save_areas visible outside cpu.c David Edmondson
2021-07-05 10:46   ` David Edmondson
2021-07-05 10:46 ` [RFC PATCH 6/8] target/i386: Observe XSAVE state area offsets David Edmondson
2021-07-05 10:46   ` David Edmondson
2021-07-05 10:46 ` David Edmondson [this message]
2021-07-05 10:46   ` [RFC PATCH 7/8] target/i386: Populate x86_ext_save_areas offsets using cpuid where possible David Edmondson
2021-07-05 10:46 ` [RFC PATCH 8/8] target/i386: Move X86XSaveArea into TCG David Edmondson
2021-07-05 10:46   ` David Edmondson
2021-07-07  1:09   ` Richard Henderson
2021-07-07  1:09     ` Richard Henderson
2021-07-07  6:51     ` Paolo Bonzini
2021-07-07 10:10     ` David Edmondson
2021-07-07 10:10       ` David Edmondson
2021-07-08  7:45       ` David Edmondson
2021-07-08 15:22         ` Richard Henderson
2021-07-08 16:13           ` David Edmondson
2021-07-05 16:57 ` [RFC PATCH 0/8] Derive XSAVE state component offsets from CPUID leaf 0xd where possible Paolo Bonzini
2021-07-05 16:57   ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210705104632.2902400-8-david.edmondson@oracle.com \
    --to=david.edmondson@oracle.com \
    --cc=babu.moger@amd.com \
    --cc=dirty@apple.com \
    --cc=ehabkost@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=michael.roth@amd.com \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=r.bolshakov@yadro.com \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.