kvmarm.lists.cs.columbia.edu archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs
@ 2023-03-29 12:15 Fuad Tabba
  2023-03-29 13:24 ` Marc Zyngier
  0 siblings, 1 reply; 3+ messages in thread
From: Fuad Tabba @ 2023-03-29 12:15 UTC (permalink / raw)
  To: kvmarm
  Cc: maz, oupton, will, pbonzini, james.morse, alexandru.elisei,
	suzuki.poulose, reijiw, ricarkol, rananta, jingzhangos, tabba

The existing pKVM code attempts to do that using a value that's
initialized to 0 but never set. To advertise csv2/3 to a
protected guest, store them at the hypervisor and use them for
setting csv2/3.

Similar to non-protected KVM, these are tracked as a system-wide
variable, rather than per cpu, for simplicity.

Fixes: 6c30bfb18d0b ("KVM: arm64: Add handlers for protected VM System Registers")
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/kvm_hyp.h   |  3 +++
 arch/arm64/kvm/arm.c               |  2 ++
 arch/arm64/kvm/hyp/nvhe/sys_regs.c | 15 ++++++++++++---
 3 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index bdd9cf546d95..723a645af191 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -127,4 +127,7 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
 extern unsigned long kvm_nvhe_sym(__icache_flags);
 extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
 
+extern bool kvm_nvhe_sym(spectre_unaffected);
+extern bool kvm_nvhe_sym(meltdown_unaffected);
+
 #endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3bd732eaf087..364a4440ae54 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1902,6 +1902,8 @@ static void kvm_hyp_init_symbols(void)
 	kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
 	kvm_nvhe_sym(__icache_flags) = __icache_flags;
 	kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
+	kvm_nvhe_sym(spectre_unaffected) = (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
+	kvm_nvhe_sym(meltdown_unaffected) = (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
 }
 
 static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 08d2b004f4b7..3f647a2f4c96 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -28,6 +28,16 @@ u64 id_aa64mmfr1_el1_sys_val;
 u64 id_aa64mmfr2_el1_sys_val;
 u64 id_aa64smfr0_el1_sys_val;
 
+/*
+ * Track whether the system isn't affected by spectre/meltown.
+ * Although this is per-CPU, we make it global for simplicity, e.g., not to have
+ * to worry about migration.
+ *
+ * Unlike for non-protected VMs, userspace cannot override this.
+ */
+bool spectre_unaffected;
+bool meltdown_unaffected;
+
 /*
  * Inject an unknown/undefined exception to an AArch64 guest while most of its
  * sysregs are live.
@@ -85,7 +95,6 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
 
 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
 {
-	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
 	u64 set_mask = 0;
 	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
 
@@ -94,9 +103,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
 
 	/* Spectre and Meltdown mitigation in KVM */
 	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
-			       (u64)kvm->arch.pfr0_csv2);
+			       spectre_unaffected);
 	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
-			       (u64)kvm->arch.pfr0_csv3);
+			       meltdown_unaffected);
 
 	return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
 }
-- 
2.40.0.348.gf938b09366-goog


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs
  2023-03-29 12:15 [PATCH] KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs Fuad Tabba
@ 2023-03-29 13:24 ` Marc Zyngier
  2023-03-30  9:19   ` Fuad Tabba
  0 siblings, 1 reply; 3+ messages in thread
From: Marc Zyngier @ 2023-03-29 13:24 UTC (permalink / raw)
  To: Fuad Tabba
  Cc: kvmarm, oupton, will, pbonzini, james.morse, alexandru.elisei,
	suzuki.poulose, reijiw, ricarkol, rananta, jingzhangos

On Wed, 29 Mar 2023 13:15:26 +0100,
Fuad Tabba <tabba@google.com> wrote:
> 
> The existing pKVM code attempts to do that using a value that's
> initialized to 0 but never set. To advertise csv2/3 to a
> protected guest, store them at the hypervisor and use them for
> setting csv2/3.
> 
> Similar to non-protected KVM, these are tracked as a system-wide
> variable, rather than per cpu, for simplicity.
> 
> Fixes: 6c30bfb18d0b ("KVM: arm64: Add handlers for protected VM System Registers")
> Signed-off-by: Fuad Tabba <tabba@google.com>
> ---
>  arch/arm64/include/asm/kvm_hyp.h   |  3 +++
>  arch/arm64/kvm/arm.c               |  2 ++
>  arch/arm64/kvm/hyp/nvhe/sys_regs.c | 15 ++++++++++++---
>  3 files changed, 17 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> index bdd9cf546d95..723a645af191 100644
> --- a/arch/arm64/include/asm/kvm_hyp.h
> +++ b/arch/arm64/include/asm/kvm_hyp.h
> @@ -127,4 +127,7 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
>  extern unsigned long kvm_nvhe_sym(__icache_flags);
>  extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
>  
> +extern bool kvm_nvhe_sym(spectre_unaffected);
> +extern bool kvm_nvhe_sym(meltdown_unaffected);
> +
>  #endif /* __ARM64_KVM_HYP_H__ */
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 3bd732eaf087..364a4440ae54 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1902,6 +1902,8 @@ static void kvm_hyp_init_symbols(void)
>  	kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
>  	kvm_nvhe_sym(__icache_flags) = __icache_flags;
>  	kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
> +	kvm_nvhe_sym(spectre_unaffected) = (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
> +	kvm_nvhe_sym(meltdown_unaffected) = (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
>  }
>  
>  static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
> diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> index 08d2b004f4b7..3f647a2f4c96 100644
> --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> @@ -28,6 +28,16 @@ u64 id_aa64mmfr1_el1_sys_val;
>  u64 id_aa64mmfr2_el1_sys_val;
>  u64 id_aa64smfr0_el1_sys_val;
>  
> +/*
> + * Track whether the system isn't affected by spectre/meltown.
> + * Although this is per-CPU, we make it global for simplicity, e.g., not to have
> + * to worry about migration.
> + *
> + * Unlike for non-protected VMs, userspace cannot override this.
> + */
> +bool spectre_unaffected;
> +bool meltdown_unaffected;
> +
>  /*
>   * Inject an unknown/undefined exception to an AArch64 guest while most of its
>   * sysregs are live.
> @@ -85,7 +95,6 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
>  
>  static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
>  {
> -	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
>  	u64 set_mask = 0;
>  	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
>  
> @@ -94,9 +103,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
>  
>  	/* Spectre and Meltdown mitigation in KVM */
>  	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
> -			       (u64)kvm->arch.pfr0_csv2);
> +			       spectre_unaffected);
>  	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
> -			       (u64)kvm->arch.pfr0_csv3);
> +			       meltdown_unaffected);

Since you already have a sanitised version of ID_AA64PFR0_EL1, it
would seem more straightforward to directly perform this adjustment at
the point where you export the value, rather than doing it at runtime.

Ideally, the proton-pack code would perform the update and set the
sanitised values directly in the cpufeature repository, but this would
involve me looking at it again, and I really don't want to do that.

Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs
  2023-03-29 13:24 ` Marc Zyngier
@ 2023-03-30  9:19   ` Fuad Tabba
  0 siblings, 0 replies; 3+ messages in thread
From: Fuad Tabba @ 2023-03-30  9:19 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: kvmarm, oupton, will, pbonzini, james.morse, alexandru.elisei,
	suzuki.poulose, reijiw, ricarkol, rananta, jingzhangos

Hi Marc,

On Wed, Mar 29, 2023 at 2:24 PM Marc Zyngier <maz@kernel.org> wrote:
>
> On Wed, 29 Mar 2023 13:15:26 +0100,
> Fuad Tabba <tabba@google.com> wrote:
> >
> > The existing pKVM code attempts to do that using a value that's
> > initialized to 0 but never set. To advertise csv2/3 to a
> > protected guest, store them at the hypervisor and use them for
> > setting csv2/3.
> >
> > Similar to non-protected KVM, these are tracked as a system-wide
> > variable, rather than per cpu, for simplicity.
> >
> > Fixes: 6c30bfb18d0b ("KVM: arm64: Add handlers for protected VM System Registers")
> > Signed-off-by: Fuad Tabba <tabba@google.com>
> > ---
> >  arch/arm64/include/asm/kvm_hyp.h   |  3 +++
> >  arch/arm64/kvm/arm.c               |  2 ++
> >  arch/arm64/kvm/hyp/nvhe/sys_regs.c | 15 ++++++++++++---
> >  3 files changed, 17 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> > index bdd9cf546d95..723a645af191 100644
> > --- a/arch/arm64/include/asm/kvm_hyp.h
> > +++ b/arch/arm64/include/asm/kvm_hyp.h
> > @@ -127,4 +127,7 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
> >  extern unsigned long kvm_nvhe_sym(__icache_flags);
> >  extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
> >
> > +extern bool kvm_nvhe_sym(spectre_unaffected);
> > +extern bool kvm_nvhe_sym(meltdown_unaffected);
> > +
> >  #endif /* __ARM64_KVM_HYP_H__ */
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 3bd732eaf087..364a4440ae54 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -1902,6 +1902,8 @@ static void kvm_hyp_init_symbols(void)
> >       kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
> >       kvm_nvhe_sym(__icache_flags) = __icache_flags;
> >       kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
> > +     kvm_nvhe_sym(spectre_unaffected) = (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
> > +     kvm_nvhe_sym(meltdown_unaffected) = (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
> >  }
> >
> >  static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
> > diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> > index 08d2b004f4b7..3f647a2f4c96 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> > @@ -28,6 +28,16 @@ u64 id_aa64mmfr1_el1_sys_val;
> >  u64 id_aa64mmfr2_el1_sys_val;
> >  u64 id_aa64smfr0_el1_sys_val;
> >
> > +/*
> > + * Track whether the system isn't affected by spectre/meltown.
> > + * Although this is per-CPU, we make it global for simplicity, e.g., not to have
> > + * to worry about migration.
> > + *
> > + * Unlike for non-protected VMs, userspace cannot override this.
> > + */
> > +bool spectre_unaffected;
> > +bool meltdown_unaffected;
> > +
> >  /*
> >   * Inject an unknown/undefined exception to an AArch64 guest while most of its
> >   * sysregs are live.
> > @@ -85,7 +95,6 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
> >
> >  static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
> >  {
> > -     const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
> >       u64 set_mask = 0;
> >       u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
> >
> > @@ -94,9 +103,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
> >
> >       /* Spectre and Meltdown mitigation in KVM */
> >       set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
> > -                            (u64)kvm->arch.pfr0_csv2);
> > +                            spectre_unaffected);
> >       set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
> > -                            (u64)kvm->arch.pfr0_csv3);
> > +                            meltdown_unaffected);
>
> Since you already have a sanitised version of ID_AA64PFR0_EL1, it
> would seem more straightforward to directly perform this adjustment at
> the point where you export the value, rather than doing it at runtime.

That makes more sense, and results in cleaner code. I'll do that.
>
> Ideally, the proton-pack code would perform the update and set the
> sanitised values directly in the cpufeature repository, but this would
> involve me looking at it again, and I really don't want to do that.

You won't have to :) I'll post v2 soon.

Thanks!
/fuad

> Thanks,
>
>         M.
>
> --
> Without deviation from the norm, progress is not possible.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-03-30  9:20 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-29 12:15 [PATCH] KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs Fuad Tabba
2023-03-29 13:24 ` Marc Zyngier
2023-03-30  9:19   ` Fuad Tabba

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).