All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marc Zyngier <maz@kernel.org>
To: Shaoqin Huang <shahuang@redhat.com>,
	Raghavendra Rao Ananta <rananta@google.com>
Cc: Oliver Upton <oliver.upton@linux.dev>,
	kvmarm@lists.linux.dev, James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Zenghui Yu <yuzenghui@huawei.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Shuah Khan <shuah@kernel.org>,
	linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org,
	linux-kselftest@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/
Date: Tue, 28 Nov 2023 08:43:18 +0000	[thread overview]
Message-ID: <ae81aa3f6527b663ef73b64a3fb72e5b@kernel.org> (raw)
In-Reply-To: <CAJHc60wsEjjLmAVUrb3n9Tyftqi7UXWh7V1hE1E90bUXiUk+Tw@mail.gmail.com>

On 2023-11-27 21:48, Raghavendra Rao Ananta wrote:
> Hi Shaoqin,
> 
> On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <shahuang@redhat.com> 
> wrote:
>> 
>> Move those pmu helper function into lib/, thus it can be used by other
>> pmu test.
>> 
>> Signed-off-by: Shaoqin Huang <shahuang@redhat.com>
>> ---
>>  .../kvm/aarch64/vpmu_counter_access.c         | 118 -----------------
>>  .../selftests/kvm/include/aarch64/vpmu.h      | 119 
>> ++++++++++++++++++
>>  2 files changed, 119 insertions(+), 118 deletions(-)
>> 
>> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c 
>> b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> index 17305408a334..62d6315790ab 100644
>> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> @@ -20,12 +20,6 @@
>>  #include <perf/arm_pmuv3.h>
>>  #include <linux/bitfield.h>
>> 
>> -/* The max number of the PMU event counters (excluding the cycle 
>> counter) */
>> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> -
>> -/* The cycle counter bit position that's common among the PMU 
>> registers */
>> -#define ARMV8_PMU_CYCLE_IDX            31
>> -
>>  static struct vpmu_vm *vpmu_vm;
>> 
>>  struct pmreg_sets {
>> @@ -35,118 +29,6 @@ struct pmreg_sets {
>> 
>>  #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
>> 
>> -static uint64_t get_pmcr_n(uint64_t pmcr)
>> -{
>> -       return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & 
>> ARMV8_PMU_PMCR_N_MASK;
>> -}
>> -
>> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> -{
>> -       *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << 
>> ARMV8_PMU_PMCR_N_SHIFT);
>> -       *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> -}
>> -
>> -static uint64_t get_counters_mask(uint64_t n)
>> -{
>> -       uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> -
>> -       if (n)
>> -               mask |= GENMASK(n - 1, 0);
>> -       return mask;
>> -}
>> -
>> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline unsigned long read_sel_evcntr(int sel)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       return read_sysreg(pmxevcntr_el0);
>> -}
>> -
>> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline void write_sel_evcntr(int sel, unsigned long val)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       write_sysreg(val, pmxevcntr_el0);
>> -       isb();
>> -}
>> -
>> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline unsigned long read_sel_evtyper(int sel)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       return read_sysreg(pmxevtyper_el0);
>> -}
>> -
>> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline void write_sel_evtyper(int sel, unsigned long val)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       write_sysreg(val, pmxevtyper_el0);
>> -       isb();
>> -}
>> -
>> -static inline void enable_counter(int idx)
>> -{
>> -       uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> -       write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> -       isb();
>> -}
>> -
>> -static inline void disable_counter(int idx)
>> -{
>> -       uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> -       write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> -       isb();
>> -}
>> -
>> -static void pmu_disable_reset(void)
>> -{
>> -       uint64_t pmcr = read_sysreg(pmcr_el0);
>> -
>> -       /* Reset all counters, disabling them */
>> -       pmcr &= ~ARMV8_PMU_PMCR_E;
>> -       write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
>> -       isb();
>> -}
>> -
>> -#define RETURN_READ_PMEVCNTRN(n) \
>> -       return read_sysreg(pmevcntr##n##_el0)
>> -static unsigned long read_pmevcntrn(int n)
>> -{
>> -       PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
>> -       return 0;
>> -}
>> -
>> -#define WRITE_PMEVCNTRN(n) \
>> -       write_sysreg(val, pmevcntr##n##_el0)
>> -static void write_pmevcntrn(int n, unsigned long val)
>> -{
>> -       PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
>> -       isb();
>> -}
>> -
>> -#define READ_PMEVTYPERN(n) \
>> -       return read_sysreg(pmevtyper##n##_el0)
>> -static unsigned long read_pmevtypern(int n)
>> -{
>> -       PMEVN_SWITCH(n, READ_PMEVTYPERN);
>> -       return 0;
>> -}
>> -
>> -#define WRITE_PMEVTYPERN(n) \
>> -       write_sysreg(val, pmevtyper##n##_el0)
>> -static void write_pmevtypern(int n, unsigned long val)
>> -{
>> -       PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
>> -       isb();
>> -}
>> -
>>  /*
>>   * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
>>   * accessors that test cases will use. Each of the accessors will
>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h 
>> b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> index 0a56183644ee..e0cc1ca1c4b7 100644
>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> @@ -1,10 +1,17 @@
>>  /* SPDX-License-Identifier: GPL-2.0 */
>> 
>>  #include <kvm_util.h>
>> +#include <perf/arm_pmuv3.h>
>> 
>>  #define GICD_BASE_GPA  0x8000000ULL
>>  #define GICR_BASE_GPA  0x80A0000ULL
>> 
>> +/* The max number of the PMU event counters (excluding the cycle 
>> counter) */
>> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> +
>> +/* The cycle counter bit position that's common among the PMU 
>> registers */
>> +#define ARMV8_PMU_CYCLE_IDX            31
>> +
>>  struct vpmu_vm {
>>         struct kvm_vm *vm;
>>         struct kvm_vcpu *vcpu;
>> @@ -14,3 +21,115 @@ struct vpmu_vm {
>>  struct vpmu_vm *create_vpmu_vm(void *guest_code);
>> 
>>  void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>> +
>> +static inline uint64_t get_pmcr_n(uint64_t pmcr)
>> +{
>> +       return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & 
>> ARMV8_PMU_PMCR_N_MASK;
>> +}
>> +
>> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> +{
>> +       *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << 
>> ARMV8_PMU_PMCR_N_SHIFT);
>> +       *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> +}
>> +
>> +static inline uint64_t get_counters_mask(uint64_t n)
>> +{
>> +       uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> +
>> +       if (n)
>> +               mask |= GENMASK(n - 1, 0);
>> +       return mask;
>> +}
>> +
>> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline unsigned long read_sel_evcntr(int sel)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       return read_sysreg(pmxevcntr_el0);
>> +}
>> +
>> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline void write_sel_evcntr(int sel, unsigned long val)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       write_sysreg(val, pmxevcntr_el0);
>> +       isb();
>> +}
>> +
>> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline unsigned long read_sel_evtyper(int sel)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       return read_sysreg(pmxevtyper_el0);
>> +}
>> +
>> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline void write_sel_evtyper(int sel, unsigned long val)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       write_sysreg(val, pmxevtyper_el0);
>> +       isb();
>> +}
>> +
>> +static inline void enable_counter(int idx)
>> +{
>> +       uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> +       write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> +       isb();
>> +}
>> +
>> +static inline void disable_counter(int idx)
>> +{
>> +       uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> +       write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> +       isb();
>> +}
>> +
> As mentioned in [1], the current implementation of disable_counter()
> is buggy and would end up disabling all the counters.
> However if you intend to keep it (even though it would remain unused),
> may be change the definition something to:
> 
> static inline void disable_counter(int idx)
> {
>     write_sysreg(BIT(idx), pmcntenclr_el0);
>     isb();
> }

Same thing for the enable_counter() function, by the way.
It doesn't have the same disastrous effect, but it is
buggy (imagine an interrupt disabling a counter between
the read and the write...).

In general, the set/clr registers should always be used
in their write form, never in a RMW form.

Thanks,

         M.
-- 
Jazz is not dead. It just smells funny...

WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: Shaoqin Huang <shahuang@redhat.com>,
	Raghavendra Rao Ananta <rananta@google.com>
Cc: Oliver Upton <oliver.upton@linux.dev>,
	kvmarm@lists.linux.dev, James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Zenghui Yu <yuzenghui@huawei.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Shuah Khan <shuah@kernel.org>,
	linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org,
	linux-kselftest@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/
Date: Tue, 28 Nov 2023 08:43:18 +0000	[thread overview]
Message-ID: <ae81aa3f6527b663ef73b64a3fb72e5b@kernel.org> (raw)
In-Reply-To: <CAJHc60wsEjjLmAVUrb3n9Tyftqi7UXWh7V1hE1E90bUXiUk+Tw@mail.gmail.com>

On 2023-11-27 21:48, Raghavendra Rao Ananta wrote:
> Hi Shaoqin,
> 
> On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <shahuang@redhat.com> 
> wrote:
>> 
>> Move those pmu helper function into lib/, thus it can be used by other
>> pmu test.
>> 
>> Signed-off-by: Shaoqin Huang <shahuang@redhat.com>
>> ---
>>  .../kvm/aarch64/vpmu_counter_access.c         | 118 -----------------
>>  .../selftests/kvm/include/aarch64/vpmu.h      | 119 
>> ++++++++++++++++++
>>  2 files changed, 119 insertions(+), 118 deletions(-)
>> 
>> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c 
>> b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> index 17305408a334..62d6315790ab 100644
>> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> @@ -20,12 +20,6 @@
>>  #include <perf/arm_pmuv3.h>
>>  #include <linux/bitfield.h>
>> 
>> -/* The max number of the PMU event counters (excluding the cycle 
>> counter) */
>> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> -
>> -/* The cycle counter bit position that's common among the PMU 
>> registers */
>> -#define ARMV8_PMU_CYCLE_IDX            31
>> -
>>  static struct vpmu_vm *vpmu_vm;
>> 
>>  struct pmreg_sets {
>> @@ -35,118 +29,6 @@ struct pmreg_sets {
>> 
>>  #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
>> 
>> -static uint64_t get_pmcr_n(uint64_t pmcr)
>> -{
>> -       return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & 
>> ARMV8_PMU_PMCR_N_MASK;
>> -}
>> -
>> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> -{
>> -       *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << 
>> ARMV8_PMU_PMCR_N_SHIFT);
>> -       *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> -}
>> -
>> -static uint64_t get_counters_mask(uint64_t n)
>> -{
>> -       uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> -
>> -       if (n)
>> -               mask |= GENMASK(n - 1, 0);
>> -       return mask;
>> -}
>> -
>> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline unsigned long read_sel_evcntr(int sel)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       return read_sysreg(pmxevcntr_el0);
>> -}
>> -
>> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline void write_sel_evcntr(int sel, unsigned long val)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       write_sysreg(val, pmxevcntr_el0);
>> -       isb();
>> -}
>> -
>> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline unsigned long read_sel_evtyper(int sel)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       return read_sysreg(pmxevtyper_el0);
>> -}
>> -
>> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline void write_sel_evtyper(int sel, unsigned long val)
>> -{
>> -       write_sysreg(sel, pmselr_el0);
>> -       isb();
>> -       write_sysreg(val, pmxevtyper_el0);
>> -       isb();
>> -}
>> -
>> -static inline void enable_counter(int idx)
>> -{
>> -       uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> -       write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> -       isb();
>> -}
>> -
>> -static inline void disable_counter(int idx)
>> -{
>> -       uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> -       write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> -       isb();
>> -}
>> -
>> -static void pmu_disable_reset(void)
>> -{
>> -       uint64_t pmcr = read_sysreg(pmcr_el0);
>> -
>> -       /* Reset all counters, disabling them */
>> -       pmcr &= ~ARMV8_PMU_PMCR_E;
>> -       write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
>> -       isb();
>> -}
>> -
>> -#define RETURN_READ_PMEVCNTRN(n) \
>> -       return read_sysreg(pmevcntr##n##_el0)
>> -static unsigned long read_pmevcntrn(int n)
>> -{
>> -       PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
>> -       return 0;
>> -}
>> -
>> -#define WRITE_PMEVCNTRN(n) \
>> -       write_sysreg(val, pmevcntr##n##_el0)
>> -static void write_pmevcntrn(int n, unsigned long val)
>> -{
>> -       PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
>> -       isb();
>> -}
>> -
>> -#define READ_PMEVTYPERN(n) \
>> -       return read_sysreg(pmevtyper##n##_el0)
>> -static unsigned long read_pmevtypern(int n)
>> -{
>> -       PMEVN_SWITCH(n, READ_PMEVTYPERN);
>> -       return 0;
>> -}
>> -
>> -#define WRITE_PMEVTYPERN(n) \
>> -       write_sysreg(val, pmevtyper##n##_el0)
>> -static void write_pmevtypern(int n, unsigned long val)
>> -{
>> -       PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
>> -       isb();
>> -}
>> -
>>  /*
>>   * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
>>   * accessors that test cases will use. Each of the accessors will
>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h 
>> b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> index 0a56183644ee..e0cc1ca1c4b7 100644
>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> @@ -1,10 +1,17 @@
>>  /* SPDX-License-Identifier: GPL-2.0 */
>> 
>>  #include <kvm_util.h>
>> +#include <perf/arm_pmuv3.h>
>> 
>>  #define GICD_BASE_GPA  0x8000000ULL
>>  #define GICR_BASE_GPA  0x80A0000ULL
>> 
>> +/* The max number of the PMU event counters (excluding the cycle 
>> counter) */
>> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> +
>> +/* The cycle counter bit position that's common among the PMU 
>> registers */
>> +#define ARMV8_PMU_CYCLE_IDX            31
>> +
>>  struct vpmu_vm {
>>         struct kvm_vm *vm;
>>         struct kvm_vcpu *vcpu;
>> @@ -14,3 +21,115 @@ struct vpmu_vm {
>>  struct vpmu_vm *create_vpmu_vm(void *guest_code);
>> 
>>  void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>> +
>> +static inline uint64_t get_pmcr_n(uint64_t pmcr)
>> +{
>> +       return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & 
>> ARMV8_PMU_PMCR_N_MASK;
>> +}
>> +
>> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> +{
>> +       *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << 
>> ARMV8_PMU_PMCR_N_SHIFT);
>> +       *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> +}
>> +
>> +static inline uint64_t get_counters_mask(uint64_t n)
>> +{
>> +       uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> +
>> +       if (n)
>> +               mask |= GENMASK(n - 1, 0);
>> +       return mask;
>> +}
>> +
>> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline unsigned long read_sel_evcntr(int sel)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       return read_sysreg(pmxevcntr_el0);
>> +}
>> +
>> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline void write_sel_evcntr(int sel, unsigned long val)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       write_sysreg(val, pmxevcntr_el0);
>> +       isb();
>> +}
>> +
>> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline unsigned long read_sel_evtyper(int sel)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       return read_sysreg(pmxevtyper_el0);
>> +}
>> +
>> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline void write_sel_evtyper(int sel, unsigned long val)
>> +{
>> +       write_sysreg(sel, pmselr_el0);
>> +       isb();
>> +       write_sysreg(val, pmxevtyper_el0);
>> +       isb();
>> +}
>> +
>> +static inline void enable_counter(int idx)
>> +{
>> +       uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> +       write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> +       isb();
>> +}
>> +
>> +static inline void disable_counter(int idx)
>> +{
>> +       uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> +       write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> +       isb();
>> +}
>> +
> As mentioned in [1], the current implementation of disable_counter()
> is buggy and would end up disabling all the counters.
> However if you intend to keep it (even though it would remain unused),
> may be change the definition something to:
> 
> static inline void disable_counter(int idx)
> {
>     write_sysreg(BIT(idx), pmcntenclr_el0);
>     isb();
> }

Same thing for the enable_counter() function, by the way.
It doesn't have the same disastrous effect, but it is
buggy (imagine an interrupt disabling a counter between
the read and the write...).

In general, the set/clr registers should always be used
in their write form, never in a RMW form.

Thanks,

         M.
-- 
Jazz is not dead. It just smells funny...

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2023-11-28  8:43 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-23  6:37 [PATCH v1 0/3] KVM: selftests: aarch64: Introduce pmu_event_filter_test Shaoqin Huang
2023-11-23  6:37 ` Shaoqin Huang
2023-11-23  6:37 ` [PATCH v1 1/3] KVM: selftests: aarch64: Make the [create|destroy]_vpmu_vm() can be reused Shaoqin Huang
2023-11-23  6:37   ` Shaoqin Huang
2023-11-24 18:14   ` Eric Auger
2023-11-24 18:14     ` Eric Auger
2023-11-29  3:23     ` Shaoqin Huang
2023-11-29  3:23       ` Shaoqin Huang
2023-11-23  6:37 ` [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/ Shaoqin Huang
2023-11-23  6:37   ` Shaoqin Huang
2023-11-24 18:14   ` Eric Auger
2023-11-24 18:14     ` Eric Auger
2023-11-27 21:48   ` Raghavendra Rao Ananta
2023-11-27 21:48     ` Raghavendra Rao Ananta
2023-11-28  8:43     ` Marc Zyngier [this message]
2023-11-28  8:43       ` Marc Zyngier
2023-11-29  3:51       ` Shaoqin Huang
2023-11-29  3:51         ` Shaoqin Huang
2023-11-29  3:50     ` Shaoqin Huang
2023-11-29  3:50       ` Shaoqin Huang
2023-11-23  6:37 ` [PATCH v1 3/3] KVM: selftests: aarch64: Introduce pmu_event_filter_test Shaoqin Huang
2023-11-23  6:37   ` Shaoqin Huang
2023-11-27  8:10   ` Eric Auger
2023-11-27  8:10     ` Eric Auger
2023-11-29  6:58     ` Shaoqin Huang
2023-11-29  6:58       ` Shaoqin Huang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ae81aa3f6527b663ef73b64a3fb72e5b@kernel.org \
    --to=maz@kernel.org \
    --cc=james.morse@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=oliver.upton@linux.dev \
    --cc=pbonzini@redhat.com \
    --cc=rananta@google.com \
    --cc=shahuang@redhat.com \
    --cc=shuah@kernel.org \
    --cc=suzuki.poulose@arm.com \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.