qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts
@ 2021-07-21 15:26 Lara Lazier
  2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for CR4 Lara Lazier
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Lara Lazier @ 2021-07-21 15:26 UTC (permalink / raw)
  To: qemu-devel; +Cc: Lara Lazier

The APM2 states that The processor takes a virtual INTR interrupt
if V_IRQ and V_INTR_PRIO indicate that there is a virtual interrupt pending
whose priority is greater than the value in V_TPR.

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
---
 target/i386/tcg/sysemu/svm_helper.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index 2e66b05729..7ce85d1515 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -118,6 +118,16 @@ static inline void svm_vmrun_canonicalization(CPUX86State *env)
     env->tr.base = (long) ((uint32_t) env->tr.base);
 }
 
+static inline bool ctl_has_irq(uint32_t int_ctl)
+{
+    uint32_t int_prio;
+    uint32_t tpr;
+
+    int_prio = (int_ctl & V_INTR_PRIO_MASK) >> V_INTR_MASKING_SHIFT;
+    tpr = int_ctl & V_TPR_MASK;
+    return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
+}
+
 static inline bool virtual_gif_enabled(CPUX86State *env, uint32_t int_ctl)
 {
     return (int_ctl & V_GIF_ENABLED_MASK) && (env->features[FEAT_SVM] & CPUID_SVM_VGIF);
@@ -363,7 +373,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     } else {
         env->hflags2 |= HF2_GIF_MASK;
     }
-    if (int_ctl & V_IRQ_MASK) {
+    if (ctl_has_irq(int_ctl)) {
         CPUState *cs = env_cpu(env);
 
         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2] target/i386: Added consistency checks for CR4
  2021-07-21 15:26 [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts Lara Lazier
@ 2021-07-21 15:26 ` Lara Lazier
  2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for EFER Lara Lazier
  2021-07-28  9:26 ` [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts Paolo Bonzini
  2 siblings, 0 replies; 5+ messages in thread
From: Lara Lazier @ 2021-07-21 15:26 UTC (permalink / raw)
  To: qemu-devel; +Cc: Lara Lazier

All MBZ bits in CR4 must be zero. (APM2 15.5)
Added reserved bitmask and added checks in both
helper_vmrun and helper_write_crN.

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
---
 target/i386/cpu.h                    | 31 ++++++++++++++++++++++++++++
 target/i386/tcg/sysemu/misc_helper.c |  3 +++
 target/i386/tcg/sysemu/svm_helper.c  |  9 +++++---
 3 files changed, 40 insertions(+), 3 deletions(-)

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 0b3057bdb6..a596e967f7 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -240,6 +240,7 @@ typedef enum X86Seg {
 #define CR4_OSFXSR_SHIFT 9
 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
 #define CR4_OSXMMEXCPT_MASK  (1U << 10)
+#define CR4_UMIP_MASK   (1U << 11)
 #define CR4_LA57_MASK   (1U << 12)
 #define CR4_VMXE_MASK   (1U << 13)
 #define CR4_SMXE_MASK   (1U << 14)
@@ -251,6 +252,36 @@ typedef enum X86Seg {
 #define CR4_PKE_MASK   (1U << 22)
 #define CR4_PKS_MASK   (1U << 24)
 
+#define CR4_RESERVED_MASK \
+(~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
+                | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
+                | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \
+                | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK |CR4_UMIP_MASK \
+                | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \
+                | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK))
+
+#define cr4_reserved_bits(env) \
+({ \
+    uint64_t __reserved_bits = CR4_RESERVED_MASK; \
+    if (!env->features[FEAT_XSAVE]) \
+        __reserved_bits |= CR4_OSXSAVE_MASK; \
+    if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) \
+        __reserved_bits |= CR4_SMEP_MASK; \
+    if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) \
+        __reserved_bits |= CR4_SMAP_MASK; \
+    if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) \
+        __reserved_bits |= CR4_FSGSBASE_MASK; \
+    if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) \
+        __reserved_bits |= CR4_PKE_MASK; \
+    if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) \
+        __reserved_bits |= CR4_LA57_MASK; \
+    if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) \
+        __reserved_bits |= CR4_UMIP_MASK; \
+    if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) \
+        __reserved_bits |= CR4_PKS_MASK; \
+    __reserved_bits; \
+})
+
 #define DR6_BD          (1 << 13)
 #define DR6_BS          (1 << 14)
 #define DR6_BT          (1 << 15)
diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c
index db0d8a9d79..a2af2c9bba 100644
--- a/target/i386/tcg/sysemu/misc_helper.c
+++ b/target/i386/tcg/sysemu/misc_helper.c
@@ -99,6 +99,9 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
         cpu_x86_update_cr3(env, t0);
         break;
     case 4:
+        if (t0 & cr4_reserved_bits(env)) {
+            cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+        }
         if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
             (env->hflags & HF_CS64_MASK)) {
             raise_exception_ra(env, EXCP0D_GPF, GETPC());
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index b6df36d4e5..37dbe8e434 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -111,6 +111,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     uint32_t int_ctl;
     uint32_t asid;
     uint64_t new_cr0;
+    uint64_t new_cr4;
 
     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
 
@@ -251,14 +252,16 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
     }
+    new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
+    if (new_cr4 & cr4_reserved_bits(env)) {
+        cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+    }
     /* clear exit_info_2 so we behave like the real hardware */
     x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
 
     cpu_x86_update_cr0(env, new_cr0);
-    cpu_x86_update_cr4(env, x86_ldq_phys(cs,
-                                     env->vm_vmcb + offsetof(struct vmcb,
-                                                             save.cr4)));
+    cpu_x86_update_cr4(env, new_cr4);
     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
                                      env->vm_vmcb + offsetof(struct vmcb,
                                                              save.cr3)));
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2] target/i386: Added consistency checks for EFER
  2021-07-21 15:26 [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts Lara Lazier
  2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for CR4 Lara Lazier
@ 2021-07-21 15:26 ` Lara Lazier
  2021-07-21 16:53   ` Paolo Bonzini
  2021-07-28  9:26 ` [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts Paolo Bonzini
  2 siblings, 1 reply; 5+ messages in thread
From: Lara Lazier @ 2021-07-21 15:26 UTC (permalink / raw)
  To: qemu-devel; +Cc: Lara Lazier

EFER.SVME has to be set, and EFER reserved bits must
be zero.
In addition the combinations
 * EFER.LMA or EFER.LME is non-zero and the processor does not support LM
 * non-zero EFER.LME and CR0.PG and zero CR4.PAE
 * non-zero EFER.LME and CR0.PG and zero CR0.PE
 * non-zero EFER.LME, CR0.PG, CR4.PAE, CS.L and CS.D
are all invalid.
(AMD64 Architecture Programmer's Manual, V2, 15.5)

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
---
 target/i386/cpu.h                   |  5 ++++
 target/i386/tcg/sysemu/svm_helper.c | 40 +++++++++++++++++++++++++++++
 2 files changed, 45 insertions(+)

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 5d98a4e7c0..0b3057bdb6 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -466,6 +466,11 @@ typedef enum X86Seg {
 #define MSR_EFER_SVME  (1 << 12)
 #define MSR_EFER_FFXSR (1 << 14)
 
+#define MSR_EFER_RESERVED\
+        (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
+            | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
+            | MSR_EFER_FFXSR))
+
 #define MSR_STAR                        0xc0000081
 #define MSR_LSTAR                       0xc0000082
 #define MSR_CSTAR                       0xc0000083
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index 00618cff23..b6df36d4e5 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -65,6 +65,42 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
                            sc->base, sc->limit, sc->flags);
 }
 
+static inline bool is_efer_invalid_state (CPUX86State *env)
+{
+    if (!(env->efer & MSR_EFER_SVME)) {
+        return true;
+    }
+
+    if (env->efer & MSR_EFER_RESERVED) {
+        return true;
+    }
+
+    if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
+            !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
+        return true;
+    }
+
+    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+                                && !(env->cr[4] & CR4_PAE_MASK)) {
+        return true;
+    }
+
+    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+                                && !(env->cr[0] & CR0_PE_MASK)) {
+        return true;
+    }
+
+    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+                                && (env->cr[4] & CR4_PAE_MASK)
+                                && (env->segs[R_CS].flags & DESC_L_MASK)
+                                && (env->segs[R_CS].flags & DESC_B_MASK)) {
+        return true;
+    }
+
+    return false;
+}
+
+
 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
 {
     CPUState *cs = env_cpu(env);
@@ -278,6 +314,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     }
 #endif
 
+    if (is_efer_invalid_state(env)) {
+        cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+    }
+
     switch (x86_ldub_phys(cs,
                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
     case TLB_CONTROL_DO_NOTHING:
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] target/i386: Added consistency checks for EFER
  2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for EFER Lara Lazier
@ 2021-07-21 16:53   ` Paolo Bonzini
  0 siblings, 0 replies; 5+ messages in thread
From: Paolo Bonzini @ 2021-07-21 16:53 UTC (permalink / raw)
  To: Lara Lazier, qemu-devel

On 21/07/21 17:26, Lara Lazier wrote:
> EFER.SVME has to be set, and EFER reserved bits must
> be zero.
> In addition the combinations
>   * EFER.LMA or EFER.LME is non-zero and the processor does not support LM
>   * non-zero EFER.LME and CR0.PG and zero CR4.PAE
>   * non-zero EFER.LME and CR0.PG and zero CR0.PE
>   * non-zero EFER.LME, CR0.PG, CR4.PAE, CS.L and CS.D
> are all invalid.
> (AMD64 Architecture Programmer's Manual, V2, 15.5)
> 
> Signed-off-by: Lara Lazier <laramglazier@gmail.com>
> ---
>   target/i386/cpu.h                   |  5 ++++
>   target/i386/tcg/sysemu/svm_helper.c | 40 +++++++++++++++++++++++++++++
>   2 files changed, 45 insertions(+)
> 
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index 5d98a4e7c0..0b3057bdb6 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -466,6 +466,11 @@ typedef enum X86Seg {
>   #define MSR_EFER_SVME  (1 << 12)
>   #define MSR_EFER_FFXSR (1 << 14)
>   
> +#define MSR_EFER_RESERVED\
> +        (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
> +            | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
> +            | MSR_EFER_FFXSR))
> +
>   #define MSR_STAR                        0xc0000081
>   #define MSR_LSTAR                       0xc0000082
>   #define MSR_CSTAR                       0xc0000083
> diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
> index 00618cff23..b6df36d4e5 100644
> --- a/target/i386/tcg/sysemu/svm_helper.c
> +++ b/target/i386/tcg/sysemu/svm_helper.c
> @@ -65,6 +65,42 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
>                              sc->base, sc->limit, sc->flags);
>   }
>   
> +static inline bool is_efer_invalid_state (CPUX86State *env)
> +{
> +    if (!(env->efer & MSR_EFER_SVME)) {
> +        return true;
> +    }
> +
> +    if (env->efer & MSR_EFER_RESERVED) {
> +        return true;
> +    }
> +
> +    if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
> +            !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
> +        return true;
> +    }
> +
> +    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
> +                                && !(env->cr[4] & CR4_PAE_MASK)) {
> +        return true;
> +    }
> +
> +    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
> +                                && !(env->cr[0] & CR0_PE_MASK)) {
> +        return true;
> +    }
> +
> +    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
> +                                && (env->cr[4] & CR4_PAE_MASK)
> +                                && (env->segs[R_CS].flags & DESC_L_MASK)
> +                                && (env->segs[R_CS].flags & DESC_B_MASK)) {
> +        return true;
> +    }
> +
> +    return false;
> +}
> +
> +
>   void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
>   {
>       CPUState *cs = env_cpu(env);
> @@ -278,6 +314,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
>       }
>   #endif
>   
> +    if (is_efer_invalid_state(env)) {
> +        cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
> +    }
> +
>       switch (x86_ldub_phys(cs,
>                         env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
>       case TLB_CONTROL_DO_NOTHING:
> 

Queued all, thanks.  However I modified the CR4 one to use a static 
inline function instead of a macro (the KVM code you based it on reuses 
the code for both the host and the guest CPUID, but this is not the case 
in QEMU).

Paolo



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts
  2021-07-21 15:26 [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts Lara Lazier
  2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for CR4 Lara Lazier
  2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for EFER Lara Lazier
@ 2021-07-28  9:26 ` Paolo Bonzini
  2 siblings, 0 replies; 5+ messages in thread
From: Paolo Bonzini @ 2021-07-28  9:26 UTC (permalink / raw)
  To: Lara Lazier, qemu-devel

On 21/07/21 17:26, Lara Lazier wrote:
> +static inline bool ctl_has_irq(uint32_t int_ctl)
> +{
> +    uint32_t int_prio;
> +    uint32_t tpr;
> +
> +    int_prio = (int_ctl & V_INTR_PRIO_MASK) >> V_INTR_MASKING_SHIFT;

Oops, I missed that this should be V_INTR_PRIO_SHIFT.  Can you send the 
correction please?

Thanks,

Paolo

> +    tpr = int_ctl & V_TPR_MASK;
> +    return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
> +}
> +



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-07-28  9:28 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-21 15:26 [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts Lara Lazier
2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for CR4 Lara Lazier
2021-07-21 15:26 ` [PATCH v2] target/i386: Added consistency checks for EFER Lara Lazier
2021-07-21 16:53   ` Paolo Bonzini
2021-07-28  9:26 ` [PATCH] target/i386: Added V_INTR_PRIO check to virtual interrupts Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).