* [PATCH] target/i386: VMRUN and VMLOAD canonicalizations
@ 2021-08-04 11:30 Lara Lazier
2021-08-06 14:06 ` Paolo Bonzini
0 siblings, 1 reply; 2+ messages in thread
From: Lara Lazier @ 2021-08-04 11:30 UTC (permalink / raw)
To: qemu-devel; +Cc: pbonzini, Lara Lazier
APM2 requires that VMRUN and VMLOAD canonicalize (sign extend to 63
from 48/57) all base addresses in the segment registers that have been
respectively loaded.
Signed-off-by: Lara Lazier <laramglazier@gmail.com>
---
target/i386/cpu.c | 19 +++++++++++--------
target/i386/cpu.h | 2 ++
target/i386/tcg/sysemu/svm_helper.c | 27 +++++++++++++++++----------
3 files changed, 30 insertions(+), 18 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 71d26cf1bd..de4c8316c9 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5108,6 +5108,15 @@ static void x86_register_cpudef_types(const X86CPUDefinition *def)
}
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
+{
+ if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
+ return 57; /* 57 bits virtual */
+ } else {
+ return 48; /* 48 bits virtual */
+ }
+}
+
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
@@ -5510,16 +5519,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
+ *eax = cpu->phys_bits;
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
/* 64 bit processor */
- *eax = cpu->phys_bits; /* configurable physical bits */
- if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
- *eax |= 0x00003900; /* 57 bits virtual */
- } else {
- *eax |= 0x00003000; /* 48 bits virtual */
- }
- } else {
- *eax = cpu->phys_bits;
+ *eax |= (cpu_x86_virtual_addr_width(env) << 8);
}
*ebx = env->features[FEAT_8000_0008_EBX];
if (cs->nr_cores * cs->nr_threads > 1) {
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 6c50d3ab4f..c9c7350c76 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1954,6 +1954,8 @@ typedef struct PropValue {
} PropValue;
void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
+
/* cpu.c other functions (cpuid) */
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index 6c29a6a778..032561ef8c 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -41,6 +41,16 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
+/*
+ * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
+ * addresses in the segment registers that have been loaded.
+ */
+static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
+{
+ uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
+ *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
+}
+
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
SegmentCache *sc)
{
@@ -53,6 +63,7 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+ svm_canonicalization(env, &sc->base);
}
static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
@@ -256,16 +267,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
offsetof(struct vmcb, control.tsc_offset));
- env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.gdtr.base));
- env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.gdtr.limit));
-
- env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.idtr.base));
- env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.idtr.limit));
-
new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
if (new_cr0 & SVM_CR0_RESERVED_MASK) {
cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
@@ -319,6 +320,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
R_SS);
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
R_DS);
+ svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
+ &env->idt);
+ svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
+ &env->gdt);
env->eip = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.rip));
@@ -456,6 +461,7 @@ void helper_vmload(CPUX86State *env, int aflag)
env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
+ svm_canonicalization(env, &env->kernelgsbase);
#endif
env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
env->sysenter_cs = x86_ldq_phys(cs,
@@ -464,6 +470,7 @@ void helper_vmload(CPUX86State *env, int aflag)
save.sysenter_esp));
env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
save.sysenter_eip));
+
}
void helper_vmsave(CPUX86State *env, int aflag)
--
2.25.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] target/i386: VMRUN and VMLOAD canonicalizations
2021-08-04 11:30 [PATCH] target/i386: VMRUN and VMLOAD canonicalizations Lara Lazier
@ 2021-08-06 14:06 ` Paolo Bonzini
0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2021-08-06 14:06 UTC (permalink / raw)
To: Lara Lazier, qemu-devel
On 04/08/21 13:30, Lara Lazier wrote:
> APM2 requires that VMRUN and VMLOAD canonicalize (sign extend to 63
> from 48/57) all base addresses in the segment registers that have been
> respectively loaded.
>
> Signed-off-by: Lara Lazier <laramglazier@gmail.com>
> ---
> target/i386/cpu.c | 19 +++++++++++--------
> target/i386/cpu.h | 2 ++
> target/i386/tcg/sysemu/svm_helper.c | 27 +++++++++++++++++----------
> 3 files changed, 30 insertions(+), 18 deletions(-)
>
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 71d26cf1bd..de4c8316c9 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -5108,6 +5108,15 @@ static void x86_register_cpudef_types(const X86CPUDefinition *def)
>
> }
>
> +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
> +{
> + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
> + return 57; /* 57 bits virtual */
> + } else {
> + return 48; /* 48 bits virtual */
> + }
> +}
> +
> void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> uint32_t *eax, uint32_t *ebx,
> uint32_t *ecx, uint32_t *edx)
> @@ -5510,16 +5519,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> break;
> case 0x80000008:
> /* virtual & phys address size in low 2 bytes. */
> + *eax = cpu->phys_bits;
> if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
> /* 64 bit processor */
> - *eax = cpu->phys_bits; /* configurable physical bits */
> - if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
> - *eax |= 0x00003900; /* 57 bits virtual */
> - } else {
> - *eax |= 0x00003000; /* 48 bits virtual */
> - }
> - } else {
> - *eax = cpu->phys_bits;
> + *eax |= (cpu_x86_virtual_addr_width(env) << 8);
> }
> *ebx = env->features[FEAT_8000_0008_EBX];
> if (cs->nr_cores * cs->nr_threads > 1) {
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index 6c50d3ab4f..c9c7350c76 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -1954,6 +1954,8 @@ typedef struct PropValue {
> } PropValue;
> void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
>
> +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
> +
> /* cpu.c other functions (cpuid) */
> void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> uint32_t *eax, uint32_t *ebx,
> diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
> index 6c29a6a778..032561ef8c 100644
> --- a/target/i386/tcg/sysemu/svm_helper.c
> +++ b/target/i386/tcg/sysemu/svm_helper.c
> @@ -41,6 +41,16 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
> ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
> }
>
> +/*
> + * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
> + * addresses in the segment registers that have been loaded.
> + */
> +static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
> +{
> + uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
> + *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
> +}
> +
> static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
> SegmentCache *sc)
> {
> @@ -53,6 +63,7 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
> sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
> flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
> sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
> + svm_canonicalization(env, &sc->base);
> }
>
> static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
> @@ -256,16 +267,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
> env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
> offsetof(struct vmcb, control.tsc_offset));
>
> - env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.gdtr.base));
> - env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.gdtr.limit));
> -
> - env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.idtr.base));
> - env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> - save.idtr.limit));
> -
> new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
> if (new_cr0 & SVM_CR0_RESERVED_MASK) {
> cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
> @@ -319,6 +320,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
> R_SS);
> svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
> R_DS);
> + svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
> + &env->idt);
> + svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
> + &env->gdt);
>
> env->eip = x86_ldq_phys(cs,
> env->vm_vmcb + offsetof(struct vmcb, save.rip));
> @@ -456,6 +461,7 @@ void helper_vmload(CPUX86State *env, int aflag)
> env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
> env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
> env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
> + svm_canonicalization(env, &env->kernelgsbase);
> #endif
> env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
> env->sysenter_cs = x86_ldq_phys(cs,
> @@ -464,6 +470,7 @@ void helper_vmload(CPUX86State *env, int aflag)
> save.sysenter_esp));
> env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
> save.sysenter_eip));
> +
> }
>
> void helper_vmsave(CPUX86State *env, int aflag)
>
Queued, thanks.
Paolo
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-08-06 14:07 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-04 11:30 [PATCH] target/i386: VMRUN and VMLOAD canonicalizations Lara Lazier
2021-08-06 14:06 ` Paolo Bonzini
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.