All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marc Zyngier <maz@kernel.org>
To: Jing Zhang <jingzhangos@google.com>
Cc: KVM <kvm@vger.kernel.org>, KVMARM <kvmarm@lists.linux.dev>,
	ARMLinux <linux-arm-kernel@lists.infradead.org>,
	Oliver Upton <oupton@google.com>, Will Deacon <will@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Fuad Tabba <tabba@google.com>, Reiji Watanabe <reijiw@google.com>,
	Raghavendra Rao Ananta <rananta@google.com>
Subject: Re: [PATCH v10 3/5] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer
Date: Sun, 28 May 2023 11:52:59 +0100	[thread overview]
Message-ID: <87r0r0ohh0.wl-maz@kernel.org> (raw)
In-Reply-To: <20230522221835.957419-4-jingzhangos@google.com>

On Mon, 22 May 2023 23:18:33 +0100,
Jing Zhang <jingzhangos@google.com> wrote:
> 
> With per guest ID registers, PMUver settings from userspace
> can be stored in its corresponding ID register.
> 
> No functional change intended.
> 
> Signed-off-by: Jing Zhang <jingzhangos@google.com>
> ---
>  arch/arm64/include/asm/kvm_host.h |  12 ++--
>  arch/arm64/kvm/arm.c              |   6 --
>  arch/arm64/kvm/sys_regs.c         | 100 ++++++++++++++++++++++++------
>  include/kvm/arm_pmu.h             |   5 +-
>  4 files changed, 92 insertions(+), 31 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 8a2fde6c04c4..7b0f43373dbe 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -246,6 +246,13 @@ struct kvm_arch {
>  #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE		7
>  	/* SMCCC filter initialized for the VM */
>  #define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED		8
> +	/*
> +	 * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
> +	 * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
> +	 * userspace for VCPUs without PMU.
> +	 */
> +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU		9
> +
>  	unsigned long flags;
>  
>  	/*
> @@ -257,11 +264,6 @@ struct kvm_arch {
>  
>  	cpumask_var_t supported_cpus;
>  
> -	struct {
> -		u8 imp:4;
> -		u8 unimp:4;
> -	} dfr0_pmuver;
> -
>  	/* Hypercall features firmware registers' descriptor */
>  	struct kvm_smccc_features smccc_feat;
>  	struct maple_tree smccc_filter;
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 5114521ace60..ca18c09ccf82 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -148,12 +148,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  	kvm_arm_init_hypercalls(kvm);
>  	kvm_arm_init_id_regs(kvm);
>  
> -	/*
> -	 * Initialise the default PMUver before there is a chance to
> -	 * create an actual PMU.
> -	 */
> -	kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
> -
>  	return 0;
>  
>  err_free_cpumask:
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 9fb1c2f8f5a5..84d9e4baa4f8 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1178,9 +1178,12 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
>  static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
>  {
>  	if (kvm_vcpu_has_pmu(vcpu))
> -		return vcpu->kvm->arch.dfr0_pmuver.imp;
> +		return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> +				 IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
> +	else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
> +		return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
>  
> -	return vcpu->kvm->arch.dfr0_pmuver.unimp;
> +	return 0;
>  }
>  
>  static u8 perfmon_to_pmuver(u8 perfmon)
> @@ -1403,8 +1406,12 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>  			       const struct sys_reg_desc *rd,
>  			       u64 val)
>  {
> +	struct kvm_arch *arch = &vcpu->kvm->arch;
> +	u64 old_val = read_id_reg(vcpu, rd);
>  	u8 pmuver, host_pmuver;
> +	u64 new_val = val;
>  	bool valid_pmu;
> +	int ret = 0;
>  
>  	host_pmuver = kvm_arm_pmu_get_pmuver_limit();
>  
> @@ -1424,26 +1431,51 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>  	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
>  		return -EINVAL;
>  
> +	mutex_lock(&arch->config_lock);
>  	/* We can only differ with PMUver, and anything else is an error */
> -	val ^= read_id_reg(vcpu, rd);
> +	val ^= old_val;
>  	val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> -	if (val)
> -		return -EINVAL;
> +	if (val) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
>  
> -	if (valid_pmu)
> -		vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
> -	else
> -		vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
> +	/* Only allow userspace to change the idregs before VM running */
> +	if (kvm_vm_has_ran_once(vcpu->kvm)) {
> +		if (new_val != old_val)
> +			ret = -EBUSY;
> +	} else {
> +		if (valid_pmu) {
> +			val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> +			val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> +			val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, pmuver);
> +			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> +
> +			val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> +			val &= ~ID_DFR0_EL1_PerfMon_MASK;
> +			val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, pmuver_to_perfmon(pmuver));
> +			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> +		} else {
> +			assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> +				   pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> +		}
> +	}
>  
> -	return 0;
> +out:
> +	mutex_unlock(&arch->config_lock);
> +	return ret;
>  }
>  
>  static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>  			   const struct sys_reg_desc *rd,
>  			   u64 val)
>  {
> +	struct kvm_arch *arch = &vcpu->kvm->arch;
> +	u64 old_val = read_id_reg(vcpu, rd);
>  	u8 perfmon, host_perfmon;
> +	u64 new_val = val;
>  	bool valid_pmu;
> +	int ret = 0;
>  
>  	host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
>  
> @@ -1464,18 +1496,39 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>  	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
>  		return -EINVAL;
>  
> +	mutex_lock(&arch->config_lock);
>  	/* We can only differ with PerfMon, and anything else is an error */
> -	val ^= read_id_reg(vcpu, rd);
> +	val ^= old_val;
>  	val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
> -	if (val)
> -		return -EINVAL;
> +	if (val) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
>  
> -	if (valid_pmu)
> -		vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
> -	else
> -		vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
> +	/* Only allow userspace to change the idregs before VM running */
> +	if (kvm_vm_has_ran_once(vcpu->kvm)) {
> +		if (new_val != old_val)
> +			ret = -EBUSY;
> +	} else {
> +		if (valid_pmu) {
> +			val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> +			val &= ~ID_DFR0_EL1_PerfMon_MASK;
> +			val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
> +			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> +
> +			val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> +			val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> +			val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, perfmon_to_pmuver(perfmon));
> +			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> +		} else {
> +			assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> +				   perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
> +		}
> +	}

This is the exact same code as for aa64fdr0. Make it a helper, please.

>  
> -	return 0;
> +out:
> +	mutex_unlock(&arch->config_lock);
> +	return ret;
>  }
>  
>  /*
> @@ -3422,6 +3475,17 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
>  	}
>  
>  	IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
> +	/*
> +	 * Initialise the default PMUver before there is a chance to
> +	 * create an actual PMU.
> +	 */
> +	val = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
> +
> +	val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> +	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> +			  kvm_arm_pmu_get_pmuver_limit());
> +
> +	IDREG(kvm, SYS_ID_AA64DFR0_EL1) = val;
>  }
>  
>  int __init kvm_sys_reg_table_init(void)
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 1a6a695ca67a..8d70dbdc1e0a 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
>  /*
>   * Evaluates as true when emulating PMUv3p5, and false otherwise.
>   */
> -#define kvm_pmu_is_3p5(vcpu)						\
> -	(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> +#define kvm_pmu_is_3p5(vcpu)									\
> +	 (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),					\
> +		    IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)

This is getting unreadable. How about something like:

diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 8d70dbdc1e0a..ecb55d87fa36 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -92,9 +92,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 /*
  * Evaluates as true when emulating PMUv3p5, and false otherwise.
  */
-#define kvm_pmu_is_3p5(vcpu)									\
-	 (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),					\
-		    IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
+#define kvm_pmu_is_3p5(vcpu)	({					\
+	u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); 		\
+	u8 v;								\
+									\
+	v = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);	\
+	v >= ID_AA64DFR0_EL1_PMUVer_V3P5;				\
+})
 
 u8 kvm_arm_pmu_get_pmuver_limit(void);
 
Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.

WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: Jing Zhang <jingzhangos@google.com>
Cc: KVM <kvm@vger.kernel.org>, KVMARM <kvmarm@lists.linux.dev>,
	ARMLinux <linux-arm-kernel@lists.infradead.org>,
	Oliver Upton <oupton@google.com>, Will Deacon <will@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Fuad Tabba <tabba@google.com>, Reiji Watanabe <reijiw@google.com>,
	Raghavendra Rao Ananta <rananta@google.com>
Subject: Re: [PATCH v10 3/5] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer
Date: Sun, 28 May 2023 11:52:59 +0100	[thread overview]
Message-ID: <87r0r0ohh0.wl-maz@kernel.org> (raw)
In-Reply-To: <20230522221835.957419-4-jingzhangos@google.com>

On Mon, 22 May 2023 23:18:33 +0100,
Jing Zhang <jingzhangos@google.com> wrote:
> 
> With per guest ID registers, PMUver settings from userspace
> can be stored in its corresponding ID register.
> 
> No functional change intended.
> 
> Signed-off-by: Jing Zhang <jingzhangos@google.com>
> ---
>  arch/arm64/include/asm/kvm_host.h |  12 ++--
>  arch/arm64/kvm/arm.c              |   6 --
>  arch/arm64/kvm/sys_regs.c         | 100 ++++++++++++++++++++++++------
>  include/kvm/arm_pmu.h             |   5 +-
>  4 files changed, 92 insertions(+), 31 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 8a2fde6c04c4..7b0f43373dbe 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -246,6 +246,13 @@ struct kvm_arch {
>  #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE		7
>  	/* SMCCC filter initialized for the VM */
>  #define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED		8
> +	/*
> +	 * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
> +	 * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
> +	 * userspace for VCPUs without PMU.
> +	 */
> +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU		9
> +
>  	unsigned long flags;
>  
>  	/*
> @@ -257,11 +264,6 @@ struct kvm_arch {
>  
>  	cpumask_var_t supported_cpus;
>  
> -	struct {
> -		u8 imp:4;
> -		u8 unimp:4;
> -	} dfr0_pmuver;
> -
>  	/* Hypercall features firmware registers' descriptor */
>  	struct kvm_smccc_features smccc_feat;
>  	struct maple_tree smccc_filter;
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 5114521ace60..ca18c09ccf82 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -148,12 +148,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  	kvm_arm_init_hypercalls(kvm);
>  	kvm_arm_init_id_regs(kvm);
>  
> -	/*
> -	 * Initialise the default PMUver before there is a chance to
> -	 * create an actual PMU.
> -	 */
> -	kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
> -
>  	return 0;
>  
>  err_free_cpumask:
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 9fb1c2f8f5a5..84d9e4baa4f8 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1178,9 +1178,12 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
>  static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
>  {
>  	if (kvm_vcpu_has_pmu(vcpu))
> -		return vcpu->kvm->arch.dfr0_pmuver.imp;
> +		return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> +				 IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
> +	else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
> +		return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
>  
> -	return vcpu->kvm->arch.dfr0_pmuver.unimp;
> +	return 0;
>  }
>  
>  static u8 perfmon_to_pmuver(u8 perfmon)
> @@ -1403,8 +1406,12 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>  			       const struct sys_reg_desc *rd,
>  			       u64 val)
>  {
> +	struct kvm_arch *arch = &vcpu->kvm->arch;
> +	u64 old_val = read_id_reg(vcpu, rd);
>  	u8 pmuver, host_pmuver;
> +	u64 new_val = val;
>  	bool valid_pmu;
> +	int ret = 0;
>  
>  	host_pmuver = kvm_arm_pmu_get_pmuver_limit();
>  
> @@ -1424,26 +1431,51 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>  	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
>  		return -EINVAL;
>  
> +	mutex_lock(&arch->config_lock);
>  	/* We can only differ with PMUver, and anything else is an error */
> -	val ^= read_id_reg(vcpu, rd);
> +	val ^= old_val;
>  	val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> -	if (val)
> -		return -EINVAL;
> +	if (val) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
>  
> -	if (valid_pmu)
> -		vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
> -	else
> -		vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
> +	/* Only allow userspace to change the idregs before VM running */
> +	if (kvm_vm_has_ran_once(vcpu->kvm)) {
> +		if (new_val != old_val)
> +			ret = -EBUSY;
> +	} else {
> +		if (valid_pmu) {
> +			val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> +			val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> +			val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, pmuver);
> +			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> +
> +			val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> +			val &= ~ID_DFR0_EL1_PerfMon_MASK;
> +			val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, pmuver_to_perfmon(pmuver));
> +			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> +		} else {
> +			assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> +				   pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> +		}
> +	}
>  
> -	return 0;
> +out:
> +	mutex_unlock(&arch->config_lock);
> +	return ret;
>  }
>  
>  static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>  			   const struct sys_reg_desc *rd,
>  			   u64 val)
>  {
> +	struct kvm_arch *arch = &vcpu->kvm->arch;
> +	u64 old_val = read_id_reg(vcpu, rd);
>  	u8 perfmon, host_perfmon;
> +	u64 new_val = val;
>  	bool valid_pmu;
> +	int ret = 0;
>  
>  	host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
>  
> @@ -1464,18 +1496,39 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>  	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
>  		return -EINVAL;
>  
> +	mutex_lock(&arch->config_lock);
>  	/* We can only differ with PerfMon, and anything else is an error */
> -	val ^= read_id_reg(vcpu, rd);
> +	val ^= old_val;
>  	val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
> -	if (val)
> -		return -EINVAL;
> +	if (val) {
> +		ret = -EINVAL;
> +		goto out;
> +	}
>  
> -	if (valid_pmu)
> -		vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
> -	else
> -		vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
> +	/* Only allow userspace to change the idregs before VM running */
> +	if (kvm_vm_has_ran_once(vcpu->kvm)) {
> +		if (new_val != old_val)
> +			ret = -EBUSY;
> +	} else {
> +		if (valid_pmu) {
> +			val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> +			val &= ~ID_DFR0_EL1_PerfMon_MASK;
> +			val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
> +			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> +
> +			val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> +			val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> +			val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, perfmon_to_pmuver(perfmon));
> +			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> +		} else {
> +			assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> +				   perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
> +		}
> +	}

This is the exact same code as for aa64fdr0. Make it a helper, please.

>  
> -	return 0;
> +out:
> +	mutex_unlock(&arch->config_lock);
> +	return ret;
>  }
>  
>  /*
> @@ -3422,6 +3475,17 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
>  	}
>  
>  	IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
> +	/*
> +	 * Initialise the default PMUver before there is a chance to
> +	 * create an actual PMU.
> +	 */
> +	val = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
> +
> +	val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> +	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> +			  kvm_arm_pmu_get_pmuver_limit());
> +
> +	IDREG(kvm, SYS_ID_AA64DFR0_EL1) = val;
>  }
>  
>  int __init kvm_sys_reg_table_init(void)
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 1a6a695ca67a..8d70dbdc1e0a 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
>  /*
>   * Evaluates as true when emulating PMUv3p5, and false otherwise.
>   */
> -#define kvm_pmu_is_3p5(vcpu)						\
> -	(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> +#define kvm_pmu_is_3p5(vcpu)									\
> +	 (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),					\
> +		    IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)

This is getting unreadable. How about something like:

diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 8d70dbdc1e0a..ecb55d87fa36 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -92,9 +92,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 /*
  * Evaluates as true when emulating PMUv3p5, and false otherwise.
  */
-#define kvm_pmu_is_3p5(vcpu)									\
-	 (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),					\
-		    IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
+#define kvm_pmu_is_3p5(vcpu)	({					\
+	u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); 		\
+	u8 v;								\
+									\
+	v = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);	\
+	v >= ID_AA64DFR0_EL1_PMUVer_V3P5;				\
+})
 
 u8 kvm_arm_pmu_get_pmuver_limit(void);
 
Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2023-05-28 10:53 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-22 22:18 [PATCH v10 0/5] Support writable CPU ID registers from userspace Jing Zhang
2023-05-22 22:18 ` Jing Zhang
2023-05-22 22:18 ` [PATCH v10 1/5] KVM: arm64: Save ID registers' sanitized value per guest Jing Zhang
2023-05-22 22:18   ` Jing Zhang
2023-05-28  9:56   ` Marc Zyngier
2023-05-28  9:56     ` Marc Zyngier
2023-05-30 18:02     ` Jing Zhang
2023-05-30 18:02       ` Jing Zhang
2023-05-31  7:24       ` Marc Zyngier
2023-05-31  7:24         ` Marc Zyngier
2023-05-31 17:25         ` Jing Zhang
2023-05-22 22:18 ` [PATCH v10 2/5] KVM: arm64: Use per guest ID register for ID_AA64PFR0_EL1.[CSV2|CSV3] Jing Zhang
2023-05-22 22:18   ` Jing Zhang
2023-05-28 10:29   ` Marc Zyngier
2023-05-28 10:29     ` Marc Zyngier
2023-05-30 18:32     ` Jing Zhang
2023-05-30 18:32       ` Jing Zhang
2023-05-22 22:18 ` [PATCH v10 3/5] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer Jing Zhang
2023-05-22 22:18   ` Jing Zhang
2023-05-28 10:52   ` Marc Zyngier [this message]
2023-05-28 10:52     ` Marc Zyngier
2023-05-30 18:35     ` Jing Zhang
2023-05-30 18:35       ` Jing Zhang
2023-05-22 22:18 ` [PATCH v10 4/5] KVM: arm64: Reuse fields of sys_reg_desc for idreg Jing Zhang
2023-05-22 22:18   ` Jing Zhang
2023-05-26 21:37   ` Oliver Upton
2023-05-26 21:37     ` Oliver Upton
2023-05-27 13:41     ` Marc Zyngier
2023-05-27 13:41       ` Marc Zyngier
2023-05-22 22:18 ` [PATCH v10 5/5] KVM: arm64: Refactor writings for PMUVer/CSV2/CSV3 Jing Zhang
2023-05-22 22:18   ` Jing Zhang
2023-05-28 11:04   ` Marc Zyngier
2023-05-28 11:04     ` Marc Zyngier
2023-05-30 21:18     ` Jing Zhang
2023-05-30 21:18       ` Jing Zhang
2023-05-31  7:31       ` Marc Zyngier
2023-05-31  7:31         ` Marc Zyngier
2023-05-31 17:29         ` Jing Zhang
2023-05-31 17:29           ` Jing Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87r0r0ohh0.wl-maz@kernel.org \
    --to=maz@kernel.org \
    --cc=alexandru.elisei@arm.com \
    --cc=james.morse@arm.com \
    --cc=jingzhangos@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=oupton@google.com \
    --cc=pbonzini@redhat.com \
    --cc=rananta@google.com \
    --cc=reijiw@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.