From: Raphael Gault <raphael.gault@arm.com> To: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: mingo@redhat.com, peterz@infradead.org, catalin.marinas@arm.com, will.deacon@arm.com, acme@kernel.org, mark.rutland@arm.com, raph.gault+kdev@gmail.com, Raphael Gault <raphael.gault@arm.com> Subject: [PATCH v4 6/7] arm64: perf: Enable pmu counter direct access for perf event on armv8 Date: Thu, 22 Aug 2019 15:42:19 +0100 [thread overview] Message-ID: <20190822144220.27860-7-raphael.gault@arm.com> (raw) In-Reply-To: <20190822144220.27860-1-raphael.gault@arm.com> Keep track of event opened with direct access to the hardware counters and modify permissions while they are open. The strategy used here is the same which x86 uses: everytime an event is mapped, the permissions are set if required. The atomic field added in the mm_context helps keep track of the different event opened and de-activate the permissions when all are unmapped. We also need to update the permissions in the context switch code so that tasks keep the right permissions. Signed-off-by: Raphael Gault <raphael.gault@arm.com> --- arch/arm64/include/asm/mmu.h | 6 ++++ arch/arm64/include/asm/mmu_context.h | 2 ++ arch/arm64/include/asm/perf_event.h | 14 ++++++++ arch/arm64/kernel/perf_event.c | 1 + drivers/perf/arm_pmu.c | 54 ++++++++++++++++++++++++++++ 5 files changed, 77 insertions(+) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index fd6161336653..88ed4466bd06 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -18,6 +18,12 @@ typedef struct { atomic64_t id; + + /* + * non-zero if userspace have access to hardware + * counters directly. + */ + atomic_t pmu_direct_access; void *vdso; unsigned long flags; } mm_context_t; diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7ed0adb187a8..6e66ff940494 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -21,6 +21,7 @@ #include <asm-generic/mm_hooks.h> #include <asm/cputype.h> #include <asm/pgtable.h> +#include <asm/perf_event.h> #include <asm/sysreg.h> #include <asm/tlbflush.h> @@ -224,6 +225,7 @@ static inline void __switch_mm(struct mm_struct *next) } check_and_switch_context(next, cpu); + perf_switch_user_access(next); } static inline void diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 2bdbc79bbd01..ba58fa726631 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -8,6 +8,7 @@ #include <asm/stack_pointer.h> #include <asm/ptrace.h> +#include <linux/mm_types.h> #define ARMV8_PMU_MAX_COUNTERS 32 #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) @@ -223,4 +224,17 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); (regs)->pstate = PSR_MODE_EL1h; \ } +static inline void perf_switch_user_access(struct mm_struct *mm) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + + if (atomic_read(&mm->context.pmu_direct_access)) { + write_sysreg(ARMV8_PMU_USERENR_ER|ARMV8_PMU_USERENR_CR, + pmuserenr_el0); + } else { + write_sysreg(0, pmuserenr_el0); + } +} + #endif diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index de9b001e8b7c..7de56f22d038 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1285,6 +1285,7 @@ void arch_perf_update_userpage(struct perf_event *event, */ freq = arch_timer_get_rate(); userpg->cap_user_time = 1; + userpg->cap_user_rdpmc = !!(event->hw.flags & ARMPMU_EL0_RD_CNTR); clocks_calc_mult_shift(&userpg->time_mult, &shift, freq, NSEC_PER_SEC, 0); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2d06b8095a19..d0d3e523a4c4 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -25,6 +25,7 @@ #include <linux/irqdesc.h> #include <asm/irq_regs.h> +#include <asm/mmu_context.h> static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); @@ -778,6 +779,57 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) &cpu_pmu->node); } +static void refresh_pmuserenr(void *mm) +{ + perf_switch_user_access(mm); +} + +static int check_homogeneous_cap(struct perf_event *event, struct mm_struct *mm) +{ + pr_info("checking HAS_HOMOGENEOUS_PMU"); + if (!cpus_have_cap(ARM64_HAS_HOMOGENEOUS_PMU)) { + pr_info("Disable direct access (!HAS_HOMOGENEOUS_PMU)"); + atomic_set(&mm->context.pmu_direct_access, 0); + on_each_cpu(refresh_pmuserenr, mm, 1); + event->hw.flags &= ~ARMPMU_EL0_RD_CNTR; + return 0; + } + + return 1; +} + +static void armpmu_event_mapped(struct perf_event *event, struct mm_struct *mm) +{ + if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR)) + return; + + /* + * This function relies on not being called concurrently in two + * tasks in the same mm. Otherwise one task could observe + * pmu_direct_access > 1 and return all the way back to + * userspace with user access disabled while another task is still + * doing on_each_cpu_mask() to enable user access. + * + * For now, this can't happen because all callers hold mmap_sem + * for write. If this changes, we'll need a different solution. + */ + lockdep_assert_held_write(&mm->mmap_sem); + + if (check_homogeneous_cap(event, mm) && + atomic_inc_return(&mm->context.pmu_direct_access) == 1) + on_each_cpu(refresh_pmuserenr, mm, 1); +} + +static void armpmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) +{ + if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR)) + return; + + if (check_homogeneous_cap(event, mm) && + atomic_dec_and_test(&mm->context.pmu_direct_access)) + on_each_cpu_mask(mm_cpumask(mm), refresh_pmuserenr, NULL, 1); +} + static struct arm_pmu *__armpmu_alloc(gfp_t flags) { struct arm_pmu *pmu; @@ -799,6 +851,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags) .pmu_enable = armpmu_enable, .pmu_disable = armpmu_disable, .event_init = armpmu_event_init, + .event_mapped = armpmu_event_mapped, + .event_unmapped = armpmu_event_unmapped, .add = armpmu_add, .del = armpmu_del, .start = armpmu_start, -- 2.17.1
WARNING: multiple messages have this Message-ID (diff)
From: Raphael Gault <raphael.gault@arm.com> To: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: mark.rutland@arm.com, raph.gault+kdev@gmail.com, peterz@infradead.org, catalin.marinas@arm.com, will.deacon@arm.com, acme@kernel.org, Raphael Gault <raphael.gault@arm.com>, mingo@redhat.com Subject: [PATCH v4 6/7] arm64: perf: Enable pmu counter direct access for perf event on armv8 Date: Thu, 22 Aug 2019 15:42:19 +0100 [thread overview] Message-ID: <20190822144220.27860-7-raphael.gault@arm.com> (raw) In-Reply-To: <20190822144220.27860-1-raphael.gault@arm.com> Keep track of event opened with direct access to the hardware counters and modify permissions while they are open. The strategy used here is the same which x86 uses: everytime an event is mapped, the permissions are set if required. The atomic field added in the mm_context helps keep track of the different event opened and de-activate the permissions when all are unmapped. We also need to update the permissions in the context switch code so that tasks keep the right permissions. Signed-off-by: Raphael Gault <raphael.gault@arm.com> --- arch/arm64/include/asm/mmu.h | 6 ++++ arch/arm64/include/asm/mmu_context.h | 2 ++ arch/arm64/include/asm/perf_event.h | 14 ++++++++ arch/arm64/kernel/perf_event.c | 1 + drivers/perf/arm_pmu.c | 54 ++++++++++++++++++++++++++++ 5 files changed, 77 insertions(+) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index fd6161336653..88ed4466bd06 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -18,6 +18,12 @@ typedef struct { atomic64_t id; + + /* + * non-zero if userspace have access to hardware + * counters directly. + */ + atomic_t pmu_direct_access; void *vdso; unsigned long flags; } mm_context_t; diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7ed0adb187a8..6e66ff940494 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -21,6 +21,7 @@ #include <asm-generic/mm_hooks.h> #include <asm/cputype.h> #include <asm/pgtable.h> +#include <asm/perf_event.h> #include <asm/sysreg.h> #include <asm/tlbflush.h> @@ -224,6 +225,7 @@ static inline void __switch_mm(struct mm_struct *next) } check_and_switch_context(next, cpu); + perf_switch_user_access(next); } static inline void diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 2bdbc79bbd01..ba58fa726631 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -8,6 +8,7 @@ #include <asm/stack_pointer.h> #include <asm/ptrace.h> +#include <linux/mm_types.h> #define ARMV8_PMU_MAX_COUNTERS 32 #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) @@ -223,4 +224,17 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); (regs)->pstate = PSR_MODE_EL1h; \ } +static inline void perf_switch_user_access(struct mm_struct *mm) +{ + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) + return; + + if (atomic_read(&mm->context.pmu_direct_access)) { + write_sysreg(ARMV8_PMU_USERENR_ER|ARMV8_PMU_USERENR_CR, + pmuserenr_el0); + } else { + write_sysreg(0, pmuserenr_el0); + } +} + #endif diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index de9b001e8b7c..7de56f22d038 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1285,6 +1285,7 @@ void arch_perf_update_userpage(struct perf_event *event, */ freq = arch_timer_get_rate(); userpg->cap_user_time = 1; + userpg->cap_user_rdpmc = !!(event->hw.flags & ARMPMU_EL0_RD_CNTR); clocks_calc_mult_shift(&userpg->time_mult, &shift, freq, NSEC_PER_SEC, 0); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2d06b8095a19..d0d3e523a4c4 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -25,6 +25,7 @@ #include <linux/irqdesc.h> #include <asm/irq_regs.h> +#include <asm/mmu_context.h> static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); @@ -778,6 +779,57 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) &cpu_pmu->node); } +static void refresh_pmuserenr(void *mm) +{ + perf_switch_user_access(mm); +} + +static int check_homogeneous_cap(struct perf_event *event, struct mm_struct *mm) +{ + pr_info("checking HAS_HOMOGENEOUS_PMU"); + if (!cpus_have_cap(ARM64_HAS_HOMOGENEOUS_PMU)) { + pr_info("Disable direct access (!HAS_HOMOGENEOUS_PMU)"); + atomic_set(&mm->context.pmu_direct_access, 0); + on_each_cpu(refresh_pmuserenr, mm, 1); + event->hw.flags &= ~ARMPMU_EL0_RD_CNTR; + return 0; + } + + return 1; +} + +static void armpmu_event_mapped(struct perf_event *event, struct mm_struct *mm) +{ + if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR)) + return; + + /* + * This function relies on not being called concurrently in two + * tasks in the same mm. Otherwise one task could observe + * pmu_direct_access > 1 and return all the way back to + * userspace with user access disabled while another task is still + * doing on_each_cpu_mask() to enable user access. + * + * For now, this can't happen because all callers hold mmap_sem + * for write. If this changes, we'll need a different solution. + */ + lockdep_assert_held_write(&mm->mmap_sem); + + if (check_homogeneous_cap(event, mm) && + atomic_inc_return(&mm->context.pmu_direct_access) == 1) + on_each_cpu(refresh_pmuserenr, mm, 1); +} + +static void armpmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) +{ + if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR)) + return; + + if (check_homogeneous_cap(event, mm) && + atomic_dec_and_test(&mm->context.pmu_direct_access)) + on_each_cpu_mask(mm_cpumask(mm), refresh_pmuserenr, NULL, 1); +} + static struct arm_pmu *__armpmu_alloc(gfp_t flags) { struct arm_pmu *pmu; @@ -799,6 +851,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags) .pmu_enable = armpmu_enable, .pmu_disable = armpmu_disable, .event_init = armpmu_event_init, + .event_mapped = armpmu_event_mapped, + .event_unmapped = armpmu_event_unmapped, .add = armpmu_add, .del = armpmu_del, .start = armpmu_start, -- 2.17.1 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2019-08-22 14:42 UTC|newest] Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-08-22 14:42 [PATCH v4 0/7] arm64: Enable access to pmu registers by user-space Raphael Gault 2019-08-22 14:42 ` Raphael Gault 2019-08-22 14:42 ` [PATCH v4 1/7] perf: arm64: Add test to check userspace access to hardware counters Raphael Gault 2019-08-22 14:42 ` Raphael Gault 2019-08-27 11:17 ` Jonathan Cameron 2019-08-27 11:17 ` Jonathan Cameron 2020-06-03 16:19 ` Rob Herring 2020-06-03 16:19 ` Rob Herring 2019-08-22 14:42 ` [PATCH v4 2/7] arm64: cpu: Add accessor for boot_cpu_data Raphael Gault 2019-08-22 14:42 ` Raphael Gault 2019-08-22 14:42 ` [PATCH v4 3/7] arm64: cpufeature: Add feature to detect homogeneous systems Raphael Gault 2019-08-22 14:42 ` Raphael Gault 2019-08-22 14:42 ` [PATCH v4 4/7] arm64: pmu: Add hook to handle pmu-related undefined instructions Raphael Gault 2019-08-22 14:42 ` Raphael Gault 2019-08-22 14:42 ` [PATCH v4 5/7] arm64: pmu: Add function implementation to update event index in userpage Raphael Gault 2019-08-22 14:42 ` Raphael Gault 2019-08-22 14:42 ` Raphael Gault [this message] 2019-08-22 14:42 ` [PATCH v4 6/7] arm64: perf: Enable pmu counter direct access for perf event on armv8 Raphael Gault 2019-08-22 16:39 ` Jonathan Cameron 2019-08-22 16:39 ` Jonathan Cameron 2019-08-22 14:42 ` [PATCH v4 7/7] Documentation: arm64: Document PMU counters access from userspace Raphael Gault 2019-08-22 14:42 ` Raphael Gault
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190822144220.27860-7-raphael.gault@arm.com \ --to=raphael.gault@arm.com \ --cc=acme@kernel.org \ --cc=catalin.marinas@arm.com \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-kernel@vger.kernel.org \ --cc=mark.rutland@arm.com \ --cc=mingo@redhat.com \ --cc=peterz@infradead.org \ --cc=raph.gault+kdev@gmail.com \ --cc=will.deacon@arm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.