All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] LoongArch: Add perf events support
@ 2022-08-15 12:47 Huacai Chen
  2022-08-16  5:46 ` WANG Xuerui
  2022-08-16  8:59 ` kernel test robot
  0 siblings, 2 replies; 7+ messages in thread
From: Huacai Chen @ 2022-08-15 12:47 UTC (permalink / raw)
  To: Arnd Bergmann, Huacai Chen, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Mark Rutland, Alexander Shishkin,
	Jiri Olsa, Namhyung Kim
  Cc: loongarch, linux-arch, Xuefeng Li, Guo Ren, Xuerui Wang,
	Jiaxun Yang, linux-perf-users, linux-kernel, Huacai Chen

Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
 arch/loongarch/Kconfig                      |   2 +
 arch/loongarch/include/uapi/asm/perf_regs.h |  40 +
 arch/loongarch/kernel/Makefile              |   2 +
 arch/loongarch/kernel/perf_event.c          | 909 ++++++++++++++++++++
 arch/loongarch/kernel/perf_regs.c           |  50 ++
 5 files changed, 1003 insertions(+)
 create mode 100644 arch/loongarch/include/uapi/asm/perf_regs.h
 create mode 100644 arch/loongarch/kernel/perf_event.c
 create mode 100644 arch/loongarch/kernel/perf_regs.c

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 24665808cf3d..9478f9646fa5 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -93,6 +93,8 @@ config LOONGARCH
 	select HAVE_NMI
 	select HAVE_PCI
 	select HAVE_PERF_EVENTS
+	select HAVE_PERF_REGS
+	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RSEQ
 	select HAVE_SETUP_PER_CPU_AREA if NUMA
diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..9943d418e01d
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_LOONGARCH_PERF_REGS_H
+#define _ASM_LOONGARCH_PERF_REGS_H
+
+enum perf_event_loongarch_regs {
+	PERF_REG_LOONGARCH_PC,
+	PERF_REG_LOONGARCH_R1,
+	PERF_REG_LOONGARCH_R2,
+	PERF_REG_LOONGARCH_R3,
+	PERF_REG_LOONGARCH_R4,
+	PERF_REG_LOONGARCH_R5,
+	PERF_REG_LOONGARCH_R6,
+	PERF_REG_LOONGARCH_R7,
+	PERF_REG_LOONGARCH_R8,
+	PERF_REG_LOONGARCH_R9,
+	PERF_REG_LOONGARCH_R10,
+	PERF_REG_LOONGARCH_R11,
+	PERF_REG_LOONGARCH_R12,
+	PERF_REG_LOONGARCH_R13,
+	PERF_REG_LOONGARCH_R14,
+	PERF_REG_LOONGARCH_R15,
+	PERF_REG_LOONGARCH_R16,
+	PERF_REG_LOONGARCH_R17,
+	PERF_REG_LOONGARCH_R18,
+	PERF_REG_LOONGARCH_R19,
+	PERF_REG_LOONGARCH_R20,
+	PERF_REG_LOONGARCH_R21,
+	PERF_REG_LOONGARCH_R22,
+	PERF_REG_LOONGARCH_R23,
+	PERF_REG_LOONGARCH_R24,
+	PERF_REG_LOONGARCH_R25,
+	PERF_REG_LOONGARCH_R26,
+	PERF_REG_LOONGARCH_R27,
+	PERF_REG_LOONGARCH_R28,
+	PERF_REG_LOONGARCH_R29,
+	PERF_REG_LOONGARCH_R30,
+	PERF_REG_LOONGARCH_R31,
+	PERF_REG_LOONGARCH_MAX = PERF_REG_LOONGARCH_R31 + 1,
+};
+#endif /* _ASM_LOONGARCH_PERF_REGS_H */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index e5be17009fe8..a213e994db68 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA)		+= numa.o
 obj-$(CONFIG_UNWINDER_GUESS)	+= unwind_guess.o
 obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
 
+obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o perf_regs.o
+
 CPPFLAGS_vmlinux.lds		:= $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
new file mode 100644
index 000000000000..00cdbcebaf80
--- /dev/null
+++ b/arch/loongarch/kernel/perf_event.c
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux performance counter support for LoongArch.
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long
+user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
+{
+	struct stack_frame buftail;
+	unsigned long err;
+	unsigned long __user *user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(user_frame_tail, sizeof(buftail)))
+		return 0;
+
+	pagefault_disable();
+	err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
+	pagefault_enable();
+
+	if (err || (unsigned long)user_frame_tail >= buftail.fp)
+		return 0;
+
+	perf_callchain_store(entry, buftail.ra);
+
+	return buftail.fp;
+}
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+			 struct pt_regs *regs)
+{
+	unsigned long fp;
+
+	if (perf_guest_state()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	perf_callchain_store(entry, regs->csr_era);
+
+	fp = regs->regs[22];
+
+	while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
+		fp = user_backtrace(entry, fp);
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+			   struct pt_regs *regs)
+{
+	struct unwind_state state;
+	unsigned long addr;
+
+	for (unwind_start(&state, current, regs);
+	      !unwind_done(&state); unwind_next_frame(&state)) {
+		addr = unwind_get_return_address(&state);
+		if (!addr || perf_callchain_store(entry, addr))
+			return;
+	}
+}
+
+#define LOONGARCH_MAX_HWEVENTS 4
+
+struct cpu_hw_events {
+	/* Array of events on this cpu. */
+	struct perf_event	*events[LOONGARCH_MAX_HWEVENTS];
+
+	/*
+	 * Set the bit (indexed by the counter number) when the counter
+	 * is used for an event.
+	 */
+	unsigned long		used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
+
+	/*
+	 * Software copy of the control register for each performance counter.
+	 * LoongArch CPUs vary in performance counters. They use this differently,
+	 * and even may not use it.
+	 */
+	unsigned int		saved_ctrl[LOONGARCH_MAX_HWEVENTS];
+};
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+	.saved_ctrl = {0},
+};
+
+/* The description of LoongArch performance events. */
+struct loongarch_perf_event {
+	unsigned int event_id;
+};
+
+static struct loongarch_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define HW_OP_UNSUPPORTED		0xffffffff
+#define CACHE_OP_UNSUPPORTED		0xffffffff
+
+#define PERF_MAP_ALL_UNSUPPORTED					\
+	[0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
+[0 ... C(MAX) - 1] = {							\
+	[0 ... C(OP_MAX) - 1] = {					\
+		[0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED},	\
+	},								\
+}
+
+struct loongarch_pmu {
+	u64		max_period;
+	u64		valid_count;
+	u64		overflow;
+	const char	*name;
+	u64		(*read_counter)(unsigned int idx);
+	void		(*write_counter)(unsigned int idx, u64 val);
+	const struct loongarch_perf_event *(*map_raw_event)(u64 config);
+	const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+	const struct loongarch_perf_event (*cache_event_map)
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX];
+	unsigned int	num_counters;
+};
+
+static struct loongarch_pmu loongarch_pmu;
+
+#define M_PERFCTL_EVENT(event)	(event & CSR_PERFCTRL_EVENT)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER	(CSR_PERFCTRL_PLV0 |	\
+					CSR_PERFCTRL_PLV1 |	\
+					CSR_PERFCTRL_PLV2 |	\
+					CSR_PERFCTRL_PLV3 |	\
+					CSR_PERFCTRL_IE)
+
+#define M_PERFCTL_CONFIG_MASK		0x1f0000
+
+#define CNTR_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+static void resume_local_counters(void);
+static void pause_local_counters(void);
+
+static u64 loongarch_pmu_read_counter(unsigned int idx)
+{
+	u64 val = -1;
+
+	switch (idx) {
+	case 0:
+		val = read_csr_perfcntr0();
+		break;
+	case 1:
+		val = read_csr_perfcntr1();
+		break;
+	case 2:
+		val = read_csr_perfcntr2();
+		break;
+	case 3:
+		val = read_csr_perfcntr3();
+		break;
+	default:
+		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+		return 0;
+	}
+
+	return val;
+}
+
+static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
+{
+	switch (idx) {
+	case 0:
+		write_csr_perfcntr0(val);
+		return;
+	case 1:
+		write_csr_perfcntr1(val);
+		return;
+	case 2:
+		write_csr_perfcntr2(val);
+		return;
+	case 3:
+		write_csr_perfcntr3(val);
+		return;
+	}
+}
+
+static unsigned int loongarch_pmu_read_control(unsigned int idx)
+{
+	unsigned int val = -1;
+
+	switch (idx) {
+	case 0:
+		val = read_csr_perfctrl0();
+		break;
+	case 1:
+		val = read_csr_perfctrl1();
+		break;
+	case 2:
+		val = read_csr_perfctrl2();
+		break;
+	case 3:
+		val = read_csr_perfctrl3();
+		break;
+	default:
+		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+		return 0;
+	}
+
+	return val;
+}
+
+static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
+{
+	switch (idx) {
+	case 0:
+		write_csr_perfctrl0(val);
+		return;
+	case 1:
+		write_csr_perfctrl1(val);
+		return;
+	case 2:
+		write_csr_perfctrl2(val);
+		return;
+	case 3:
+		write_csr_perfctrl3(val);
+		return;
+	}
+}
+
+static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+				    struct hw_perf_event *hwc)
+{
+	int i;
+
+	for (i = loongarch_pmu.num_counters - 1; i >= 0; i--) {
+		if (!test_and_set_bit(i, cpuc->used_mask))
+			return i;
+	}
+
+	return -EAGAIN;
+}
+
+static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+	struct perf_event *event = container_of(evt, struct perf_event, hw);
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	unsigned int cpu;
+
+	WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
+		/* Make sure interrupt enabled. */
+		CSR_PERFCTRL_IE;
+
+	cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
+
+	pr_debug("Enabling perf counter for CPU%d\n", cpu);
+	/*
+	 * We do not actually let the counter run. Leave it until start().
+	 */
+}
+
+static void loongarch_pmu_disable_event(int idx)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	unsigned long flags;
+
+	WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+	local_irq_save(flags);
+	cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
+		~M_PERFCTL_COUNT_EVENT_WHENEVER;
+	loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+	local_irq_restore(flags);
+}
+
+static int loongarch_pmu_event_set_period(struct perf_event *event,
+				    struct hw_perf_event *hwc,
+				    int idx)
+{
+	u64 left = local64_read(&hwc->period_left);
+	u64 period = hwc->sample_period;
+	int ret = 0;
+
+	if (unlikely((left + period) & (1ULL << 63))) {
+		/* left underflowed by more than period. */
+		left = period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	} else	if (unlikely((left + period) <= period)) {
+		/* left underflowed by less than period. */
+		left += period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (left > loongarch_pmu.max_period) {
+		left = loongarch_pmu.max_period;
+		local64_set(&hwc->period_left, left);
+	}
+
+	local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
+
+	loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
+
+	perf_event_update_userpage(event);
+
+	return ret;
+}
+
+static void loongarch_pmu_event_update(struct perf_event *event,
+				 struct hw_perf_event *hwc,
+				 int idx)
+{
+	u64 delta;
+	u64 prev_raw_count, new_raw_count;
+
+again:
+	prev_raw_count = local64_read(&hwc->prev_count);
+	new_raw_count = loongarch_pmu.read_counter(idx);
+
+	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+				new_raw_count) != prev_raw_count)
+		goto again;
+
+	delta = new_raw_count - prev_raw_count;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+}
+
+static void loongarch_pmu_start(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+
+	/* Set the period for the event. */
+	loongarch_pmu_event_set_period(event, hwc, hwc->idx);
+
+	/* Enable the event. */
+	loongarch_pmu_enable_event(hwc, hwc->idx);
+}
+
+static void loongarch_pmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		/* We are working on a local event. */
+		loongarch_pmu_disable_event(hwc->idx);
+		barrier();
+		loongarch_pmu_event_update(event, hwc, hwc->idx);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static int loongarch_pmu_add(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	perf_pmu_disable(event->pmu);
+
+	/* To look for a free counter for this event. */
+	idx = loongarch_pmu_alloc_counter(cpuc, hwc);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then
+	 * make sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	loongarch_pmu_disable_event(idx);
+	cpuc->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		loongarch_pmu_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+static void loongarch_pmu_del(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+	loongarch_pmu_stop(event, PERF_EF_UPDATE);
+	cpuc->events[idx] = NULL;
+	clear_bit(idx, cpuc->used_mask);
+
+	perf_event_update_userpage(event);
+}
+
+static void loongarch_pmu_read(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	/* Don't read disabled counters! */
+	if (hwc->idx < 0)
+		return;
+
+	loongarch_pmu_event_update(event, hwc, hwc->idx);
+}
+
+static void loongarch_pmu_enable(struct pmu *pmu)
+{
+	resume_local_counters();
+}
+
+static void loongarch_pmu_disable(struct pmu *pmu)
+{
+	pause_local_counters();
+}
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+	if (atomic_dec_and_mutex_lock(&active_events,
+				&pmu_reserve_mutex)) {
+		/*
+		 * We must not call the destroy function with interrupts
+		 * disabled.
+		 */
+		on_each_cpu(reset_counters,
+			(void *)(long)loongarch_pmu.num_counters, 1);
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void handle_associated_event(struct cpu_hw_events *cpuc,
+				    int idx, struct perf_sample_data *data,
+				    struct pt_regs *regs)
+{
+	struct perf_event *event = cpuc->events[idx];
+	struct hw_perf_event *hwc = &event->hw;
+
+	loongarch_pmu_event_update(event, hwc, idx);
+	data->period = event->hw.last_period;
+	if (!loongarch_pmu_event_set_period(event, hwc, idx))
+		return;
+
+	if (perf_event_overflow(event, data, regs))
+		loongarch_pmu_disable_event(idx);
+}
+
+static irqreturn_t pmu_handle_irq(int irq, void *dev)
+{
+	int handled = IRQ_NONE;
+	unsigned int counters = loongarch_pmu.num_counters;
+	u64 counter;
+	struct pt_regs *regs;
+	struct perf_sample_data data;
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+	/*
+	 * First we pause the local counters, so that when we are locked
+	 * here, the counters are all paused. When it gets locked due to
+	 * perf_disable(), the timer interrupt handler will be delayed.
+	 *
+	 * See also loongarch_pmu_start().
+	 */
+	pause_local_counters();
+
+	regs = get_irq_regs();
+
+	perf_sample_data_init(&data, 0, 0);
+
+	switch (counters) {
+#define HANDLE_COUNTER(n)						\
+	case n + 1:							\
+		if (test_bit(n, cpuc->used_mask)) {			\
+			counter = loongarch_pmu.read_counter(n);	\
+			if (counter & loongarch_pmu.overflow) {		\
+				handle_associated_event(cpuc, n, &data, regs); \
+				handled = IRQ_HANDLED;			\
+			}						\
+		}
+	HANDLE_COUNTER(3)
+		fallthrough;
+	HANDLE_COUNTER(2)
+		fallthrough;
+	HANDLE_COUNTER(1)
+		fallthrough;
+	HANDLE_COUNTER(0)
+	}
+
+	resume_local_counters();
+
+	/*
+	 * Do all the work for the pending perf events. We can do this
+	 * in here because the performance counter interrupt is a regular
+	 * interrupt, not NMI.
+	 */
+	if (handled == IRQ_HANDLED)
+		irq_work_run();
+
+	return handled;
+}
+
+static int get_pmc_irq(void)
+{
+	struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+
+	if (d)
+		return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START);
+
+	return -EINVAL;
+}
+
+static int loongarch_pmu_event_init(struct perf_event *event)
+{
+	int r, irq;
+	unsigned long flags;
+
+	/* does not support taken branch sampling */
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_RAW:
+	case PERF_TYPE_HARDWARE:
+	case PERF_TYPE_HW_CACHE:
+		break;
+
+	default:
+		/* Init it to avoid false validate_group */
+		event->hw.event_base = 0xffffffff;
+		return -ENOENT;
+	}
+
+	if (event->cpu >= 0 && !cpu_online(event->cpu))
+		return -ENODEV;
+
+	irq = get_pmc_irq();
+	flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
+	if (!atomic_inc_not_zero(&active_events)) {
+		mutex_lock(&pmu_reserve_mutex);
+		if (atomic_read(&active_events) == 0) {
+			r = request_irq(irq, pmu_handle_irq,
+					flags, "Perf_PMU", &loongarch_pmu);
+			if (r < 0) {
+				pr_warn("PMU IRQ request failed\n");
+				return -ENODEV;
+			}
+		}
+		atomic_inc(&active_events);
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+
+	return __hw_perf_event_init(event);
+}
+
+static struct pmu pmu = {
+	.pmu_enable	= loongarch_pmu_enable,
+	.pmu_disable	= loongarch_pmu_disable,
+	.event_init	= loongarch_pmu_event_init,
+	.add		= loongarch_pmu_add,
+	.del		= loongarch_pmu_del,
+	.start		= loongarch_pmu_start,
+	.stop		= loongarch_pmu_stop,
+	.read		= loongarch_pmu_read,
+};
+
+static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
+{
+	return (pev->event_id & 0xff);
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
+{
+	const struct loongarch_perf_event *pev;
+
+	pev = &(*loongarch_pmu.general_event_map)[idx];
+
+	if (pev->event_id == HW_OP_UNSUPPORTED)
+		return ERR_PTR(-ENOENT);
+
+	return pev;
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result;
+	const struct loongarch_perf_event *pev;
+
+	cache_type = (config >> 0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return ERR_PTR(-EINVAL);
+
+	cache_op = (config >> 8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return ERR_PTR(-EINVAL);
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	pev = &((*loongarch_pmu.cache_event_map)
+					[cache_type]
+					[cache_op]
+					[cache_result]);
+
+	if (pev->event_id == CACHE_OP_UNSUPPORTED)
+		return ERR_PTR(-ENOENT);
+
+	return pev;
+}
+
+static int validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct cpu_hw_events fake_cpuc;
+
+	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+	if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+		return -EINVAL;
+
+	for_each_sibling_event(sibling, leader) {
+		if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+			return -EINVAL;
+	}
+
+	if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void reset_counters(void *arg)
+{
+	int counters = (int)(long)arg;
+
+	switch (counters) {
+	case 4:
+		loongarch_pmu_write_control(3, 0);
+		loongarch_pmu.write_counter(3, 0);
+		fallthrough;
+	case 3:
+		loongarch_pmu_write_control(2, 0);
+		loongarch_pmu.write_counter(2, 0);
+		fallthrough;
+	case 2:
+		loongarch_pmu_write_control(1, 0);
+		loongarch_pmu.write_counter(1, 0);
+		fallthrough;
+	case 1:
+		loongarch_pmu_write_control(0, 0);
+		loongarch_pmu.write_counter(0, 0);
+	}
+}
+
+static const struct loongarch_perf_event loongson_new_event_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
+	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
+	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
+	[PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
+	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
+};
+
+static const struct loongarch_perf_event loongson_new_cache_map
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+PERF_CACHE_MAP_ALL_UNSUPPORTED,
+[C(L1D)] = {
+	/*
+	 * Like some other architectures (e.g. ARM), the performance
+	 * counters don't differentiate between read and write
+	 * accesses/misses, so this isn't strictly correct, but it's the
+	 * best we can do. Writes and reads get combined.
+	 */
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0x8 },
+		[C(RESULT_MISS)]	= { 0x9 },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)]	= { 0x8 },
+		[C(RESULT_MISS)]	= { 0x9 },
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)]	= { 0xaa },
+		[C(RESULT_MISS)]	= { 0xa9 },
+	},
+},
+[C(L1I)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0x6 },
+		[C(RESULT_MISS)]	= { 0x7 },
+	},
+},
+[C(LL)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0xc },
+		[C(RESULT_MISS)]	= { 0xd },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)]	= { 0xc },
+		[C(RESULT_MISS)]	= { 0xd },
+	},
+},
+[C(ITLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_MISS)]    = { 0x3b },
+	},
+},
+[C(DTLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0x4 },
+		[C(RESULT_MISS)]	= { 0x3c },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)]	= { 0x4 },
+		[C(RESULT_MISS)]	= { 0x3c },
+	},
+},
+[C(BPU)] = {
+	/* Using the same code for *HW_BRANCH* */
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]  = { 0x02 },
+		[C(RESULT_MISS)]    = { 0x03 },
+	},
+},
+};
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	struct hw_perf_event *hwc = &event->hw;
+	const struct loongarch_perf_event *pev;
+	int err;
+
+	/* Returning LoongArch event descriptor for generic perf event. */
+	if (PERF_TYPE_HARDWARE == event->attr.type) {
+		if (event->attr.config >= PERF_COUNT_HW_MAX)
+			return -EINVAL;
+		pev = loongarch_pmu_map_general_event(event->attr.config);
+	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+		pev = loongarch_pmu_map_cache_event(event->attr.config);
+	} else if (PERF_TYPE_RAW == event->attr.type) {
+		/* We are working on the global raw event. */
+		mutex_lock(&raw_event_mutex);
+		pev = loongarch_pmu.map_raw_event(event->attr.config);
+	} else {
+		/* The event type is not (yet) supported. */
+		return -EOPNOTSUPP;
+	}
+
+	if (IS_ERR(pev)) {
+		if (PERF_TYPE_RAW == event->attr.type)
+			mutex_unlock(&raw_event_mutex);
+		return PTR_ERR(pev);
+	}
+
+	/*
+	 * We allow max flexibility on how each individual counter shared
+	 * by the single CPU operates (the mode exclusion and the range).
+	 */
+	hwc->config_base = CSR_PERFCTRL_IE;
+
+	hwc->event_base = loongarch_pmu_perf_event_encode(pev);
+	if (PERF_TYPE_RAW == event->attr.type)
+		mutex_unlock(&raw_event_mutex);
+
+	if (!attr->exclude_user) {
+		hwc->config_base |= CSR_PERFCTRL_PLV3;
+		hwc->config_base |= CSR_PERFCTRL_PLV2;
+	}
+	if (!attr->exclude_kernel) {
+		hwc->config_base |= CSR_PERFCTRL_PLV0;
+	}
+	if (!attr->exclude_hv) {
+		hwc->config_base |= CSR_PERFCTRL_PLV1;
+	}
+
+	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+	/*
+	 * The event can belong to another cpu. We do not assign a local
+	 * counter for it for now.
+	 */
+	hwc->idx = -1;
+	hwc->config = 0;
+
+	if (!hwc->sample_period) {
+		hwc->sample_period  = loongarch_pmu.max_period;
+		hwc->last_period    = hwc->sample_period;
+		local64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	err = 0;
+	if (event->group_leader != event)
+		err = validate_group(event);
+
+	event->destroy = hw_perf_event_destroy;
+
+	if (err)
+		event->destroy(event);
+
+	return err;
+}
+
+static void pause_local_counters(void)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	int ctr = loongarch_pmu.num_counters;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	do {
+		ctr--;
+		cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
+		loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
+					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+	} while (ctr > 0);
+	local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	int ctr = loongarch_pmu.num_counters;
+
+	do {
+		ctr--;
+		loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
+	} while (ctr > 0);
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
+{
+	raw_event.event_id = config & 0xff;
+
+	return &raw_event;
+}
+
+static int __init
+init_hw_perf_events(void)
+{
+	int counters = 4;
+
+	if (!cpu_has_pmp)
+		return -ENODEV;
+
+	pr_info("Performance counters: ");
+
+	loongarch_pmu.num_counters = counters;
+	loongarch_pmu.max_period = (1ULL << 63) - 1;
+	loongarch_pmu.valid_count = (1ULL << 63) - 1;
+	loongarch_pmu.overflow = 1ULL << 63;
+	loongarch_pmu.name = "loongarch/loongson64";
+	loongarch_pmu.read_counter = loongarch_pmu_read_counter;
+	loongarch_pmu.write_counter = loongarch_pmu_write_counter;
+	loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
+	loongarch_pmu.general_event_map = &loongson_new_event_map;
+	loongarch_pmu.cache_event_map = &loongson_new_cache_map;
+
+	on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
+		"CPU.\n", loongarch_pmu.name, counters, 64);
+
+	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
+	return 0;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c
new file mode 100644
index 000000000000..a5e9768e8414
--- /dev/null
+++ b/arch/loongarch/kernel/perf_regs.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/perf_event.h>
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_32BIT
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+	return PERF_SAMPLE_REGS_ABI_32;
+}
+#else /* Must be CONFIG_64BIT */
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+	if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
+		return PERF_SAMPLE_REGS_ABI_32;
+	else
+		return PERF_SAMPLE_REGS_ABI_64;
+}
+#endif /* CONFIG_32BIT */
+
+int perf_reg_validate(u64 mask)
+{
+	if (!mask)
+		return -EINVAL;
+	if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
+		return -EINVAL;
+	return 0;
+}
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+	if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
+		return 0;
+
+	if ((u32)idx == PERF_REG_LOONGARCH_PC)
+		return regs->csr_era;
+
+	return regs->regs[idx];
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+			struct pt_regs *regs)
+{
+	regs_user->regs = task_pt_regs(current);
+	regs_user->abi = perf_reg_abi(current);
+}
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] LoongArch: Add perf events support
  2022-08-15 12:47 [PATCH] LoongArch: Add perf events support Huacai Chen
@ 2022-08-16  5:46 ` WANG Xuerui
  2022-08-16  8:18   ` Huacai Chen
  2022-08-16  8:59 ` kernel test robot
  1 sibling, 1 reply; 7+ messages in thread
From: WANG Xuerui @ 2022-08-16  5:46 UTC (permalink / raw)
  To: Huacai Chen, Arnd Bergmann, Huacai Chen, Peter Zijlstra,
	Ingo Molnar, Arnaldo Carvalho de Melo, Mark Rutland,
	Alexander Shishkin, Jiri Olsa, Namhyung Kim
  Cc: loongarch, linux-arch, Xuefeng Li, Guo Ren, Jiaxun Yang,
	linux-perf-users, linux-kernel

On 2022/8/15 20:47, Huacai Chen wrote:
> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
> ---
>   arch/loongarch/Kconfig                      |   2 +
>   arch/loongarch/include/uapi/asm/perf_regs.h |  40 +
>   arch/loongarch/kernel/Makefile              |   2 +
>   arch/loongarch/kernel/perf_event.c          | 909 ++++++++++++++++++++
>   arch/loongarch/kernel/perf_regs.c           |  50 ++
>   5 files changed, 1003 insertions(+)
>   create mode 100644 arch/loongarch/include/uapi/asm/perf_regs.h
>   create mode 100644 arch/loongarch/kernel/perf_event.c
>   create mode 100644 arch/loongarch/kernel/perf_regs.c

The code seems mostly ripped from arch/mips/kernel/perf_event_mipsxx.c. 
I reviewed about half of the code then suddenly realized I might be 
looking at MIPS code, given some of the English strings there seemed way 
too "natural"...

But unfortunately, at least for 3A5000 whose micro-architecture is 
largely shared with the MIPS-implementing 3A4000, it seems inevitable to 
involve some of the more MIPS-looking logic. The 1st-generation LA 
privileged architecture is way too much MIPS-like after all, so if we 
want any support for the 3A5000 we'd have to include this.

> 
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 24665808cf3d..9478f9646fa5 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -93,6 +93,8 @@ config LOONGARCH
>   	select HAVE_NMI
>   	select HAVE_PCI
>   	select HAVE_PERF_EVENTS
> +	select HAVE_PERF_REGS
> +	select HAVE_PERF_USER_STACK_DUMP
>   	select HAVE_REGS_AND_STACK_ACCESS_API
>   	select HAVE_RSEQ
>   	select HAVE_SETUP_PER_CPU_AREA if NUMA
> diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h
> new file mode 100644
> index 000000000000..9943d418e01d
> --- /dev/null
> +++ b/arch/loongarch/include/uapi/asm/perf_regs.h
> @@ -0,0 +1,40 @@
> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> +#ifndef _ASM_LOONGARCH_PERF_REGS_H
> +#define _ASM_LOONGARCH_PERF_REGS_H
> +
> +enum perf_event_loongarch_regs {
> +	PERF_REG_LOONGARCH_PC,
> +	PERF_REG_LOONGARCH_R1,
> +	PERF_REG_LOONGARCH_R2,
> +	PERF_REG_LOONGARCH_R3,
> +	PERF_REG_LOONGARCH_R4,
> +	PERF_REG_LOONGARCH_R5,
> +	PERF_REG_LOONGARCH_R6,
> +	PERF_REG_LOONGARCH_R7,
> +	PERF_REG_LOONGARCH_R8,
> +	PERF_REG_LOONGARCH_R9,
> +	PERF_REG_LOONGARCH_R10,
> +	PERF_REG_LOONGARCH_R11,
> +	PERF_REG_LOONGARCH_R12,
> +	PERF_REG_LOONGARCH_R13,
> +	PERF_REG_LOONGARCH_R14,
> +	PERF_REG_LOONGARCH_R15,
> +	PERF_REG_LOONGARCH_R16,
> +	PERF_REG_LOONGARCH_R17,
> +	PERF_REG_LOONGARCH_R18,
> +	PERF_REG_LOONGARCH_R19,
> +	PERF_REG_LOONGARCH_R20,
> +	PERF_REG_LOONGARCH_R21,
> +	PERF_REG_LOONGARCH_R22,
> +	PERF_REG_LOONGARCH_R23,
> +	PERF_REG_LOONGARCH_R24,
> +	PERF_REG_LOONGARCH_R25,
> +	PERF_REG_LOONGARCH_R26,
> +	PERF_REG_LOONGARCH_R27,
> +	PERF_REG_LOONGARCH_R28,
> +	PERF_REG_LOONGARCH_R29,
> +	PERF_REG_LOONGARCH_R30,
> +	PERF_REG_LOONGARCH_R31,
> +	PERF_REG_LOONGARCH_MAX = PERF_REG_LOONGARCH_R31 + 1,

No need for this "PERF_REG_LOONGARCH_R31 + 1" because it's what happens 
without the assignment anyway?

> +};
> +#endif /* _ASM_LOONGARCH_PERF_REGS_H */
> diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
> index e5be17009fe8..a213e994db68 100644
> --- a/arch/loongarch/kernel/Makefile
> +++ b/arch/loongarch/kernel/Makefile
> @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA)		+= numa.o
>   obj-$(CONFIG_UNWINDER_GUESS)	+= unwind_guess.o
>   obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>   
> +obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o perf_regs.o
> +
>   CPPFLAGS_vmlinux.lds		:= $(KBUILD_CFLAGS)
> diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
> new file mode 100644
> index 000000000000..00cdbcebaf80
> --- /dev/null
> +++ b/arch/loongarch/kernel/perf_event.c
> @@ -0,0 +1,909 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Linux performance counter support for LoongArch.

Please indicate its MIPS origin and copyright info ;-)

> + *
> + * Copyright (C) 2022 Loongson Technology Corporation Limited
> + */
> +
> +#include <linux/cpumask.h>
> +#include <linux/interrupt.h>
> +#include <linux/smp.h>
> +#include <linux/kernel.h>
> +#include <linux/perf_event.h>
> +#include <linux/uaccess.h>
> +#include <linux/sched/task_stack.h>
> +
> +#include <asm/irq.h>
> +#include <asm/irq_regs.h>
> +#include <asm/stacktrace.h>
> +#include <asm/unwind.h>
> +
> +/*
> + * Get the return address for a single stackframe and return a pointer to the
> + * next frame tail.
> + */
> +static unsigned long
> +user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
> +{
> +	struct stack_frame buftail;
> +	unsigned long err;
> +	unsigned long __user *user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
> +
> +	/* Also check accessibility of one struct frame_tail beyond */
> +	if (!access_ok(user_frame_tail, sizeof(buftail)))
> +		return 0;
> +
> +	pagefault_disable();
> +	err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
> +	pagefault_enable();
> +
> +	if (err || (unsigned long)user_frame_tail >= buftail.fp)
> +		return 0;
> +
> +	perf_callchain_store(entry, buftail.ra);
> +
> +	return buftail.fp;
> +}
> +
> +void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
> +			 struct pt_regs *regs)
> +{
> +	unsigned long fp;
> +
> +	if (perf_guest_state()) {
> +		/* We don't support guest os callchain now */
> +		return;
> +	}
> +
> +	perf_callchain_store(entry, regs->csr_era);
> +
> +	fp = regs->regs[22];
> +
> +	while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
> +		fp = user_backtrace(entry, fp);
> +}
> +
> +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
> +			   struct pt_regs *regs)
> +{
> +	struct unwind_state state;
> +	unsigned long addr;
> +
> +	for (unwind_start(&state, current, regs);
> +	      !unwind_done(&state); unwind_next_frame(&state)) {
> +		addr = unwind_get_return_address(&state);
> +		if (!addr || perf_callchain_store(entry, addr))
> +			return;
> +	}
> +}
> +
> +#define LOONGARCH_MAX_HWEVENTS 4
> +
> +struct cpu_hw_events {
> +	/* Array of events on this cpu. */
> +	struct perf_event	*events[LOONGARCH_MAX_HWEVENTS];
> +
> +	/*
> +	 * Set the bit (indexed by the counter number) when the counter
> +	 * is used for an event.
> +	 */
> +	unsigned long		used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
> +
> +	/*
> +	 * Software copy of the control register for each performance counter.
> +	 * LoongArch CPUs vary in performance counters. They use this differently,
> +	 * and even may not use it.

I can't easily make sense of the paragraph. "Software copy" could mean 
"Saved copy", but how do "use differently" and "even may not use it" 
mean? For the latter I can't deduce if it's originally "some even may 
not exist" in someone's head, and for the former I can't imagine what's 
the possible cases and why we would care.

Maybe explain a little bit more?

> +	 */
> +	unsigned int		saved_ctrl[LOONGARCH_MAX_HWEVENTS];
> +};
> +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
> +	.saved_ctrl = {0},
> +};
> +
> +/* The description of LoongArch performance events. */
> +struct loongarch_perf_event {
> +	unsigned int event_id;
> +};
> +
> +static struct loongarch_perf_event raw_event;
> +static DEFINE_MUTEX(raw_event_mutex);
> +
> +#define C(x) PERF_COUNT_HW_CACHE_##x
> +#define HW_OP_UNSUPPORTED		0xffffffff
> +#define CACHE_OP_UNSUPPORTED		0xffffffff
> +
> +#define PERF_MAP_ALL_UNSUPPORTED					\
> +	[0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
> +
> +#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
> +[0 ... C(MAX) - 1] = {							\
> +	[0 ... C(OP_MAX) - 1] = {					\
> +		[0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED},	\
> +	},								\
> +}
> +
> +struct loongarch_pmu {
> +	u64		max_period;
> +	u64		valid_count;
> +	u64		overflow;
> +	const char	*name;
> +	u64		(*read_counter)(unsigned int idx);
> +	void		(*write_counter)(unsigned int idx, u64 val);
> +	const struct loongarch_perf_event *(*map_raw_event)(u64 config);
> +	const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
> +	const struct loongarch_perf_event (*cache_event_map)
> +				[PERF_COUNT_HW_CACHE_MAX]
> +				[PERF_COUNT_HW_CACHE_OP_MAX]
> +				[PERF_COUNT_HW_CACHE_RESULT_MAX];

Apparently general_event_map and cache_event_map are not function 
pointers? So the parens around the field name should be removed.

> +	unsigned int	num_counters;
> +};
> +
> +static struct loongarch_pmu loongarch_pmu;
> +
> +#define M_PERFCTL_EVENT(event)	(event & CSR_PERFCTRL_EVENT)
> +
> +#define M_PERFCTL_COUNT_EVENT_WHENEVER	(CSR_PERFCTRL_PLV0 |	\
> +					CSR_PERFCTRL_PLV1 |	\
> +					CSR_PERFCTRL_PLV2 |	\
> +					CSR_PERFCTRL_PLV3 |	\
> +					CSR_PERFCTRL_IE)
> +
> +#define M_PERFCTL_CONFIG_MASK		0x1f0000
> +
> +#define CNTR_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))

Isn't this just GENMASK(n - 1, 0)?

> +
> +static void resume_local_counters(void);
> +static void pause_local_counters(void);
> +
> +static u64 loongarch_pmu_read_counter(unsigned int idx)
> +{
> +	u64 val = -1;
> +
> +	switch (idx) {
> +	case 0:
> +		val = read_csr_perfcntr0();
> +		break;
> +	case 1:
> +		val = read_csr_perfcntr1();
> +		break;
> +	case 2:
> +		val = read_csr_perfcntr2();
> +		break;
> +	case 3:
> +		val = read_csr_perfcntr3();
> +		break;
> +	default:
> +		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
> +		return 0;
> +	}
> +
> +	return val;
> +}
> +
> +static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
> +{
> +	switch (idx) {
> +	case 0:
> +		write_csr_perfcntr0(val);
> +		return;
> +	case 1:
> +		write_csr_perfcntr1(val);
> +		return;
> +	case 2:
> +		write_csr_perfcntr2(val);
> +		return;
> +	case 3:
> +		write_csr_perfcntr3(val);
> +		return;

Want a default branch for this function, similar to the read case?

> +	}
> +}
> +
> +static unsigned int loongarch_pmu_read_control(unsigned int idx)
> +{
> +	unsigned int val = -1;
> +
> +	switch (idx) {
> +	case 0:
> +		val = read_csr_perfctrl0();
> +		break;
> +	case 1:
> +		val = read_csr_perfctrl1();
> +		break;
> +	case 2:
> +		val = read_csr_perfctrl2();
> +		break;
> +	case 3:
> +		val = read_csr_perfctrl3();
> +		break;
> +	default:
> +		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
> +		return 0;
> +	}
> +
> +	return val;
> +}
> +
> +static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
> +{
> +	switch (idx) {
> +	case 0:
> +		write_csr_perfctrl0(val);
> +		return;
> +	case 1:
> +		write_csr_perfctrl1(val);
> +		return;
> +	case 2:
> +		write_csr_perfctrl2(val);
> +		return;
> +	case 3:
> +		write_csr_perfctrl3(val);
> +		return;

Similarly here.

> +	}
> +}
> +
> +static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc,
> +				    struct hw_perf_event *hwc)
> +{
> +	int i;
> +
> +	for (i = loongarch_pmu.num_counters - 1; i >= 0; i--) {
> +		if (!test_and_set_bit(i, cpuc->used_mask))
> +			return i;
> +	}
> +
> +	return -EAGAIN;
> +}
> +
> +static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
> +{
> +	struct perf_event *event = container_of(evt, struct perf_event, hw);
> +	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +	unsigned int cpu;
> +
> +	WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> +
> +	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
> +		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
> +		/* Make sure interrupt enabled. */
> +		CSR_PERFCTRL_IE;
> +
> +	cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
> +
> +	pr_debug("Enabling perf counter for CPU%d\n", cpu);
> +	/*
> +	 * We do not actually let the counter run. Leave it until start().
> +	 */
> +}
> +
> +static void loongarch_pmu_disable_event(int idx)
> +{
> +	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +	unsigned long flags;
> +
> +	WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> +
> +	local_irq_save(flags);
> +	cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
> +		~M_PERFCTL_COUNT_EVENT_WHENEVER;
> +	loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
> +	local_irq_restore(flags);
> +}
> +
> +static int loongarch_pmu_event_set_period(struct perf_event *event,
> +				    struct hw_perf_event *hwc,
> +				    int idx)
> +{
> +	u64 left = local64_read(&hwc->period_left);
> +	u64 period = hwc->sample_period;
> +	int ret = 0;
> +
> +	if (unlikely((left + period) & (1ULL << 63))) {
> +		/* left underflowed by more than period. */
> +		left = period;
> +		local64_set(&hwc->period_left, left);
> +		hwc->last_period = period;
> +		ret = 1;
> +	} else	if (unlikely((left + period) <= period)) {
> +		/* left underflowed by less than period. */
> +		left += period;
> +		local64_set(&hwc->period_left, left);
> +		hwc->last_period = period;
> +		ret = 1;
> +	}
> +
> +	if (left > loongarch_pmu.max_period) {
> +		left = loongarch_pmu.max_period;
> +		local64_set(&hwc->period_left, left);
> +	}
> +
> +	local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
> +
> +	loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
> +
> +	perf_event_update_userpage(event);
> +
> +	return ret;
> +}
> +
> +static void loongarch_pmu_event_update(struct perf_event *event,
> +				 struct hw_perf_event *hwc,
> +				 int idx)
> +{
> +	u64 delta;
> +	u64 prev_raw_count, new_raw_count;
> +
> +again:
> +	prev_raw_count = local64_read(&hwc->prev_count);
> +	new_raw_count = loongarch_pmu.read_counter(idx);
> +
> +	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
> +				new_raw_count) != prev_raw_count)
> +		goto again;
> +
> +	delta = new_raw_count - prev_raw_count;
> +
> +	local64_add(delta, &event->count);
> +	local64_sub(delta, &hwc->period_left);
> +}
> +
> +static void loongarch_pmu_start(struct perf_event *event, int flags)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +
> +	if (flags & PERF_EF_RELOAD)
> +		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
> +
> +	hwc->state = 0;
> +
> +	/* Set the period for the event. */
> +	loongarch_pmu_event_set_period(event, hwc, hwc->idx);
> +
> +	/* Enable the event. */
> +	loongarch_pmu_enable_event(hwc, hwc->idx);
> +}
> +
> +static void loongarch_pmu_stop(struct perf_event *event, int flags)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +
> +	if (!(hwc->state & PERF_HES_STOPPED)) {
> +		/* We are working on a local event. */
> +		loongarch_pmu_disable_event(hwc->idx);
> +		barrier();
> +		loongarch_pmu_event_update(event, hwc, hwc->idx);
> +		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +	}
> +}
> +
> +static int loongarch_pmu_add(struct perf_event *event, int flags)
> +{
> +	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx;
> +	int err = 0;
> +
> +	perf_pmu_disable(event->pmu);
> +
> +	/* To look for a free counter for this event. */
> +	idx = loongarch_pmu_alloc_counter(cpuc, hwc);
> +	if (idx < 0) {
> +		err = idx;
> +		goto out;
> +	}
> +
> +	/*
> +	 * If there is an event in the counter we are going to use then
> +	 * make sure it is disabled.
> +	 */
> +	event->hw.idx = idx;
> +	loongarch_pmu_disable_event(idx);
> +	cpuc->events[idx] = event;
> +
> +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +	if (flags & PERF_EF_START)
> +		loongarch_pmu_start(event, PERF_EF_RELOAD);
> +
> +	/* Propagate our changes to the userspace mapping. */
> +	perf_event_update_userpage(event);
> +
> +out:
> +	perf_pmu_enable(event->pmu);
> +	return err;
> +}
> +
> +static void loongarch_pmu_del(struct perf_event *event, int flags)
> +{
> +	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx = hwc->idx;
> +
> +	WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> +
> +	loongarch_pmu_stop(event, PERF_EF_UPDATE);
> +	cpuc->events[idx] = NULL;
> +	clear_bit(idx, cpuc->used_mask);
> +
> +	perf_event_update_userpage(event);
> +}
> +
> +static void loongarch_pmu_read(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +
> +	/* Don't read disabled counters! */
> +	if (hwc->idx < 0)
> +		return;
> +
> +	loongarch_pmu_event_update(event, hwc, hwc->idx);
> +}
> +
> +static void loongarch_pmu_enable(struct pmu *pmu)
> +{
> +	resume_local_counters();
> +}
> +
> +static void loongarch_pmu_disable(struct pmu *pmu)
> +{
> +	pause_local_counters();
> +}
> +
> +static atomic_t active_events = ATOMIC_INIT(0);
> +static DEFINE_MUTEX(pmu_reserve_mutex);
> +
> +static void reset_counters(void *arg);
> +static int __hw_perf_event_init(struct perf_event *event);
> +
> +static void hw_perf_event_destroy(struct perf_event *event)
> +{
> +	if (atomic_dec_and_mutex_lock(&active_events,
> +				&pmu_reserve_mutex)) {
> +		/*
> +		 * We must not call the destroy function with interrupts
> +		 * disabled.
> +		 */
> +		on_each_cpu(reset_counters,
> +			(void *)(long)loongarch_pmu.num_counters, 1);
> +		mutex_unlock(&pmu_reserve_mutex);
> +	}
> +}
> +
> +/* This is needed by specific irq handlers in perf_event_*.c */
> +static void handle_associated_event(struct cpu_hw_events *cpuc,
> +				    int idx, struct perf_sample_data *data,
> +				    struct pt_regs *regs)
> +{
> +	struct perf_event *event = cpuc->events[idx];
> +	struct hw_perf_event *hwc = &event->hw;
> +
> +	loongarch_pmu_event_update(event, hwc, idx);
> +	data->period = event->hw.last_period;
> +	if (!loongarch_pmu_event_set_period(event, hwc, idx))
> +		return;
> +
> +	if (perf_event_overflow(event, data, regs))
> +		loongarch_pmu_disable_event(idx);
> +}
> +
> +static irqreturn_t pmu_handle_irq(int irq, void *dev)
> +{
> +	int handled = IRQ_NONE;
> +	unsigned int counters = loongarch_pmu.num_counters;
> +	u64 counter;
> +	struct pt_regs *regs;
> +	struct perf_sample_data data;
> +	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +
> +	/*
> +	 * First we pause the local counters, so that when we are locked
> +	 * here, the counters are all paused. When it gets locked due to
> +	 * perf_disable(), the timer interrupt handler will be delayed.
> +	 *
> +	 * See also loongarch_pmu_start().
> +	 */
> +	pause_local_counters();
> +
> +	regs = get_irq_regs();
> +
> +	perf_sample_data_init(&data, 0, 0);
> +
> +	switch (counters) {
> +#define HANDLE_COUNTER(n)						\
> +	case n + 1:							\
> +		if (test_bit(n, cpuc->used_mask)) {			\
> +			counter = loongarch_pmu.read_counter(n);	\
> +			if (counter & loongarch_pmu.overflow) {		\
> +				handle_associated_event(cpuc, n, &data, regs); \
> +				handled = IRQ_HANDLED;			\
> +			}						\
> +		}
> +	HANDLE_COUNTER(3)
> +		fallthrough;
> +	HANDLE_COUNTER(2)
> +		fallthrough;
> +	HANDLE_COUNTER(1)
> +		fallthrough;
> +	HANDLE_COUNTER(0)
> +	}
> +
> +	resume_local_counters();
> +
> +	/*
> +	 * Do all the work for the pending perf events. We can do this
> +	 * in here because the performance counter interrupt is a regular
> +	 * interrupt, not NMI.
> +	 */
> +	if (handled == IRQ_HANDLED)
> +		irq_work_run();
> +
> +	return handled;
> +}
> +
> +static int get_pmc_irq(void)
> +{
> +	struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
> +
> +	if (d)
> +		return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START);
> +
> +	return -EINVAL;
> +}
> +
> +static int loongarch_pmu_event_init(struct perf_event *event)
> +{
> +	int r, irq;
> +	unsigned long flags;
> +
> +	/* does not support taken branch sampling */
> +	if (has_branch_stack(event))
> +		return -EOPNOTSUPP;
> +
> +	switch (event->attr.type) {
> +	case PERF_TYPE_RAW:
> +	case PERF_TYPE_HARDWARE:
> +	case PERF_TYPE_HW_CACHE:
> +		break;
> +
> +	default:
> +		/* Init it to avoid false validate_group */
> +		event->hw.event_base = 0xffffffff;
> +		return -ENOENT;
> +	}
> +
> +	if (event->cpu >= 0 && !cpu_online(event->cpu))
> +		return -ENODEV;
> +
> +	irq = get_pmc_irq();
> +	flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
> +	if (!atomic_inc_not_zero(&active_events)) {
> +		mutex_lock(&pmu_reserve_mutex);
> +		if (atomic_read(&active_events) == 0) {
> +			r = request_irq(irq, pmu_handle_irq,
> +					flags, "Perf_PMU", &loongarch_pmu);
> +			if (r < 0) {
> +				pr_warn("PMU IRQ request failed\n");
> +				return -ENODEV;
> +			}
> +		}
> +		atomic_inc(&active_events);
> +		mutex_unlock(&pmu_reserve_mutex);
> +	}
> +
> +	return __hw_perf_event_init(event);
> +}
> +
> +static struct pmu pmu = {
> +	.pmu_enable	= loongarch_pmu_enable,
> +	.pmu_disable	= loongarch_pmu_disable,
> +	.event_init	= loongarch_pmu_event_init,
> +	.add		= loongarch_pmu_add,
> +	.del		= loongarch_pmu_del,
> +	.start		= loongarch_pmu_start,
> +	.stop		= loongarch_pmu_stop,
> +	.read		= loongarch_pmu_read,
> +};
> +
> +static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
> +{
> +	return (pev->event_id & 0xff);
> +}
> +
> +static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
> +{
> +	const struct loongarch_perf_event *pev;
> +
> +	pev = &(*loongarch_pmu.general_event_map)[idx];
> +
> +	if (pev->event_id == HW_OP_UNSUPPORTED)
> +		return ERR_PTR(-ENOENT);
> +
> +	return pev;
> +}
> +
> +static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
> +{
> +	unsigned int cache_type, cache_op, cache_result;
> +	const struct loongarch_perf_event *pev;
> +
> +	cache_type = (config >> 0) & 0xff;
> +	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
> +		return ERR_PTR(-EINVAL);
> +
> +	cache_op = (config >> 8) & 0xff;
> +	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
> +		return ERR_PTR(-EINVAL);
> +
> +	cache_result = (config >> 16) & 0xff;
> +	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
> +		return ERR_PTR(-EINVAL);
> +
> +	pev = &((*loongarch_pmu.cache_event_map)
> +					[cache_type]
> +					[cache_op]
> +					[cache_result]);
> +
> +	if (pev->event_id == CACHE_OP_UNSUPPORTED)
> +		return ERR_PTR(-ENOENT);
> +
> +	return pev;
> +}
> +
> +static int validate_group(struct perf_event *event)
> +{
> +	struct perf_event *sibling, *leader = event->group_leader;
> +	struct cpu_hw_events fake_cpuc;
> +
> +	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
> +
> +	if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
> +		return -EINVAL;
> +
> +	for_each_sibling_event(sibling, leader) {
> +		if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
> +			return -EINVAL;
> +	}
> +
> +	if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static void reset_counters(void *arg)
> +{
> +	int counters = (int)(long)arg;
> +
> +	switch (counters) {
> +	case 4:
> +		loongarch_pmu_write_control(3, 0);
> +		loongarch_pmu.write_counter(3, 0);
> +		fallthrough;
> +	case 3:
> +		loongarch_pmu_write_control(2, 0);
> +		loongarch_pmu.write_counter(2, 0);
> +		fallthrough;
> +	case 2:
> +		loongarch_pmu_write_control(1, 0);
> +		loongarch_pmu.write_counter(1, 0);
> +		fallthrough;
> +	case 1:
> +		loongarch_pmu_write_control(0, 0);
> +		loongarch_pmu.write_counter(0, 0);
> +	}
> +}
> +
> +static const struct loongarch_perf_event loongson_new_event_map[PERF_COUNT_HW_MAX] = {
> +	PERF_MAP_ALL_UNSUPPORTED,
> +	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
> +	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
> +	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
> +	[PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
> +	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
> +	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
> +};
> +
> +static const struct loongarch_perf_event loongson_new_cache_map
> +				[PERF_COUNT_HW_CACHE_MAX]
> +				[PERF_COUNT_HW_CACHE_OP_MAX]
> +				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
> +PERF_CACHE_MAP_ALL_UNSUPPORTED,
> +[C(L1D)] = {
> +	/*
> +	 * Like some other architectures (e.g. ARM), the performance
> +	 * counters don't differentiate between read and write
> +	 * accesses/misses, so this isn't strictly correct, but it's the
> +	 * best we can do. Writes and reads get combined.
> +	 */
> +	[C(OP_READ)] = {
> +		[C(RESULT_ACCESS)]	= { 0x8 },
> +		[C(RESULT_MISS)]	= { 0x9 },
> +	},
> +	[C(OP_WRITE)] = {
> +		[C(RESULT_ACCESS)]	= { 0x8 },
> +		[C(RESULT_MISS)]	= { 0x9 },
> +	},
> +	[C(OP_PREFETCH)] = {
> +		[C(RESULT_ACCESS)]	= { 0xaa },
> +		[C(RESULT_MISS)]	= { 0xa9 },
> +	},
> +},
> +[C(L1I)] = {
> +	[C(OP_READ)] = {
> +		[C(RESULT_ACCESS)]	= { 0x6 },
> +		[C(RESULT_MISS)]	= { 0x7 },
> +	},
> +},
> +[C(LL)] = {
> +	[C(OP_READ)] = {
> +		[C(RESULT_ACCESS)]	= { 0xc },
> +		[C(RESULT_MISS)]	= { 0xd },
> +	},
> +	[C(OP_WRITE)] = {
> +		[C(RESULT_ACCESS)]	= { 0xc },
> +		[C(RESULT_MISS)]	= { 0xd },
> +	},
> +},
> +[C(ITLB)] = {
> +	[C(OP_READ)] = {
> +		[C(RESULT_MISS)]    = { 0x3b },
> +	},
> +},
> +[C(DTLB)] = {
> +	[C(OP_READ)] = {
> +		[C(RESULT_ACCESS)]	= { 0x4 },
> +		[C(RESULT_MISS)]	= { 0x3c },
> +	},
> +	[C(OP_WRITE)] = {
> +		[C(RESULT_ACCESS)]	= { 0x4 },
> +		[C(RESULT_MISS)]	= { 0x3c },
> +	},
> +},
> +[C(BPU)] = {
> +	/* Using the same code for *HW_BRANCH* */
> +	[C(OP_READ)] = {
> +		[C(RESULT_ACCESS)]  = { 0x02 },
> +		[C(RESULT_MISS)]    = { 0x03 },
> +	},
> +},
> +};
> +
> +static int __hw_perf_event_init(struct perf_event *event)
> +{
> +	struct perf_event_attr *attr = &event->attr;
> +	struct hw_perf_event *hwc = &event->hw;
> +	const struct loongarch_perf_event *pev;
> +	int err;
> +
> +	/* Returning LoongArch event descriptor for generic perf event. */
> +	if (PERF_TYPE_HARDWARE == event->attr.type) {
> +		if (event->attr.config >= PERF_COUNT_HW_MAX)
> +			return -EINVAL;
> +		pev = loongarch_pmu_map_general_event(event->attr.config);
> +	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
> +		pev = loongarch_pmu_map_cache_event(event->attr.config);
> +	} else if (PERF_TYPE_RAW == event->attr.type) {
> +		/* We are working on the global raw event. */
> +		mutex_lock(&raw_event_mutex);
> +		pev = loongarch_pmu.map_raw_event(event->attr.config);
> +	} else {
> +		/* The event type is not (yet) supported. */
> +		return -EOPNOTSUPP;
> +	}
> +
> +	if (IS_ERR(pev)) {
> +		if (PERF_TYPE_RAW == event->attr.type)
> +			mutex_unlock(&raw_event_mutex);
> +		return PTR_ERR(pev);
> +	}
> +
> +	/*
> +	 * We allow max flexibility on how each individual counter shared
> +	 * by the single CPU operates (the mode exclusion and the range).
> +	 */
> +	hwc->config_base = CSR_PERFCTRL_IE;
> +
> +	hwc->event_base = loongarch_pmu_perf_event_encode(pev);
> +	if (PERF_TYPE_RAW == event->attr.type)
> +		mutex_unlock(&raw_event_mutex);
> +
> +	if (!attr->exclude_user) {
> +		hwc->config_base |= CSR_PERFCTRL_PLV3;
> +		hwc->config_base |= CSR_PERFCTRL_PLV2;
> +	}
> +	if (!attr->exclude_kernel) {
> +		hwc->config_base |= CSR_PERFCTRL_PLV0;
> +	}
> +	if (!attr->exclude_hv) {
> +		hwc->config_base |= CSR_PERFCTRL_PLV1;
> +	}
> +
> +	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
> +	/*
> +	 * The event can belong to another cpu. We do not assign a local
> +	 * counter for it for now.
> +	 */
> +	hwc->idx = -1;
> +	hwc->config = 0;
> +
> +	if (!hwc->sample_period) {
> +		hwc->sample_period  = loongarch_pmu.max_period;
> +		hwc->last_period    = hwc->sample_period;
> +		local64_set(&hwc->period_left, hwc->sample_period);
> +	}
> +
> +	err = 0;
> +	if (event->group_leader != event)
> +		err = validate_group(event);
> +
> +	event->destroy = hw_perf_event_destroy;
> +
> +	if (err)
> +		event->destroy(event);
> +
> +	return err;
> +}
> +
> +static void pause_local_counters(void)
> +{
> +	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +	int ctr = loongarch_pmu.num_counters;
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
> +	do {
> +		ctr--;
> +		cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
> +		loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
> +					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
> +	} while (ctr > 0);
> +	local_irq_restore(flags);
> +}
> +
> +static void resume_local_counters(void)
> +{
> +	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +	int ctr = loongarch_pmu.num_counters;
> +
> +	do {
> +		ctr--;
> +		loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
> +	} while (ctr > 0);
> +}
> +
> +static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
> +{
> +	raw_event.event_id = config & 0xff;
> +
> +	return &raw_event;
> +}
> +
> +static int __init
> +init_hw_perf_events(void)
> +{
> +	int counters = 4;
> +
> +	if (!cpu_has_pmp)
> +		return -ENODEV;
> +
> +	pr_info("Performance counters: ");
> +
> +	loongarch_pmu.num_counters = counters;
> +	loongarch_pmu.max_period = (1ULL << 63) - 1;
> +	loongarch_pmu.valid_count = (1ULL << 63) - 1;
> +	loongarch_pmu.overflow = 1ULL << 63;
> +	loongarch_pmu.name = "loongarch/loongson64";
> +	loongarch_pmu.read_counter = loongarch_pmu_read_counter;
> +	loongarch_pmu.write_counter = loongarch_pmu_write_counter;
> +	loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
> +	loongarch_pmu.general_event_map = &loongson_new_event_map;
> +	loongarch_pmu.cache_event_map = &loongson_new_cache_map;
> +
> +	on_each_cpu(reset_counters, (void *)(long)counters, 1);
> +
> +	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
> +		"CPU.\n", loongarch_pmu.name, counters, 64);
> +
> +	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
> +
> +	return 0;
> +}
> +early_initcall(init_hw_perf_events);
> diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c
> new file mode 100644
> index 000000000000..a5e9768e8414
> --- /dev/null
> +++ b/arch/loongarch/kernel/perf_regs.c
> @@ -0,0 +1,50 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2022 Loongson Technology Corporation Limited

And this file.

> + */
> +
> +#include <linux/perf_event.h>
> +
> +#include <asm/ptrace.h>
> +
> +#ifdef CONFIG_32BIT
> +u64 perf_reg_abi(struct task_struct *tsk)
> +{
> +	return PERF_SAMPLE_REGS_ABI_32;
> +}
> +#else /* Must be CONFIG_64BIT */
> +u64 perf_reg_abi(struct task_struct *tsk)
> +{
> +	if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
> +		return PERF_SAMPLE_REGS_ABI_32;
> +	else
> +		return PERF_SAMPLE_REGS_ABI_64;
> +}
> +#endif /* CONFIG_32BIT */
> +
> +int perf_reg_validate(u64 mask)
> +{
> +	if (!mask)
> +		return -EINVAL;
> +	if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
> +		return -EINVAL;
> +	return 0;
> +}
> +
> +u64 perf_reg_value(struct pt_regs *regs, int idx)
> +{
> +	if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
> +		return 0;
> +
> +	if ((u32)idx == PERF_REG_LOONGARCH_PC)
> +		return regs->csr_era;
> +
> +	return regs->regs[idx];
> +}
> +
> +void perf_get_regs_user(struct perf_regs *regs_user,
> +			struct pt_regs *regs)
> +{
> +	regs_user->regs = task_pt_regs(current);
> +	regs_user->abi = perf_reg_abi(current);
> +}

-- 
WANG "xen0n" Xuerui

Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] LoongArch: Add perf events support
  2022-08-16  5:46 ` WANG Xuerui
@ 2022-08-16  8:18   ` Huacai Chen
  2022-08-16 10:07     ` Qi Hu
  0 siblings, 1 reply; 7+ messages in thread
From: Huacai Chen @ 2022-08-16  8:18 UTC (permalink / raw)
  To: WANG Xuerui
  Cc: Huacai Chen, Arnd Bergmann, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Mark Rutland, Alexander Shishkin,
	Jiri Olsa, Namhyung Kim, loongarch, linux-arch, Xuefeng Li,
	Guo Ren, Jiaxun Yang, linux-perf-users, LKML

 Hi, Xuerui,

On Tue, Aug 16, 2022 at 1:46 PM WANG Xuerui <kernel@xen0n.name> wrote:
>
> On 2022/8/15 20:47, Huacai Chen wrote:
> > Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
> > ---
> >   arch/loongarch/Kconfig                      |   2 +
> >   arch/loongarch/include/uapi/asm/perf_regs.h |  40 +
> >   arch/loongarch/kernel/Makefile              |   2 +
> >   arch/loongarch/kernel/perf_event.c          | 909 ++++++++++++++++++++
> >   arch/loongarch/kernel/perf_regs.c           |  50 ++
> >   5 files changed, 1003 insertions(+)
> >   create mode 100644 arch/loongarch/include/uapi/asm/perf_regs.h
> >   create mode 100644 arch/loongarch/kernel/perf_event.c
> >   create mode 100644 arch/loongarch/kernel/perf_regs.c
>
> The code seems mostly ripped from arch/mips/kernel/perf_event_mipsxx.c.
> I reviewed about half of the code then suddenly realized I might be
> looking at MIPS code, given some of the English strings there seemed way
> too "natural"...
>
> But unfortunately, at least for 3A5000 whose micro-architecture is
> largely shared with the MIPS-implementing 3A4000, it seems inevitable to
> involve some of the more MIPS-looking logic. The 1st-generation LA
> privileged architecture is way too much MIPS-like after all, so if we
> want any support for the 3A5000 we'd have to include this.
>
> >
> > diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> > index 24665808cf3d..9478f9646fa5 100644
> > --- a/arch/loongarch/Kconfig
> > +++ b/arch/loongarch/Kconfig
> > @@ -93,6 +93,8 @@ config LOONGARCH
> >       select HAVE_NMI
> >       select HAVE_PCI
> >       select HAVE_PERF_EVENTS
> > +     select HAVE_PERF_REGS
> > +     select HAVE_PERF_USER_STACK_DUMP
> >       select HAVE_REGS_AND_STACK_ACCESS_API
> >       select HAVE_RSEQ
> >       select HAVE_SETUP_PER_CPU_AREA if NUMA
> > diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h
> > new file mode 100644
> > index 000000000000..9943d418e01d
> > --- /dev/null
> > +++ b/arch/loongarch/include/uapi/asm/perf_regs.h
> > @@ -0,0 +1,40 @@
> > +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> > +#ifndef _ASM_LOONGARCH_PERF_REGS_H
> > +#define _ASM_LOONGARCH_PERF_REGS_H
> > +
> > +enum perf_event_loongarch_regs {
> > +     PERF_REG_LOONGARCH_PC,
> > +     PERF_REG_LOONGARCH_R1,
> > +     PERF_REG_LOONGARCH_R2,
> > +     PERF_REG_LOONGARCH_R3,
> > +     PERF_REG_LOONGARCH_R4,
> > +     PERF_REG_LOONGARCH_R5,
> > +     PERF_REG_LOONGARCH_R6,
> > +     PERF_REG_LOONGARCH_R7,
> > +     PERF_REG_LOONGARCH_R8,
> > +     PERF_REG_LOONGARCH_R9,
> > +     PERF_REG_LOONGARCH_R10,
> > +     PERF_REG_LOONGARCH_R11,
> > +     PERF_REG_LOONGARCH_R12,
> > +     PERF_REG_LOONGARCH_R13,
> > +     PERF_REG_LOONGARCH_R14,
> > +     PERF_REG_LOONGARCH_R15,
> > +     PERF_REG_LOONGARCH_R16,
> > +     PERF_REG_LOONGARCH_R17,
> > +     PERF_REG_LOONGARCH_R18,
> > +     PERF_REG_LOONGARCH_R19,
> > +     PERF_REG_LOONGARCH_R20,
> > +     PERF_REG_LOONGARCH_R21,
> > +     PERF_REG_LOONGARCH_R22,
> > +     PERF_REG_LOONGARCH_R23,
> > +     PERF_REG_LOONGARCH_R24,
> > +     PERF_REG_LOONGARCH_R25,
> > +     PERF_REG_LOONGARCH_R26,
> > +     PERF_REG_LOONGARCH_R27,
> > +     PERF_REG_LOONGARCH_R28,
> > +     PERF_REG_LOONGARCH_R29,
> > +     PERF_REG_LOONGARCH_R30,
> > +     PERF_REG_LOONGARCH_R31,
> > +     PERF_REG_LOONGARCH_MAX = PERF_REG_LOONGARCH_R31 + 1,
>
> No need for this "PERF_REG_LOONGARCH_R31 + 1" because it's what happens
> without the assignment anyway?
PERF_REG_LOONGARCH_MAX is used in perf_events.c

>
> > +};
> > +#endif /* _ASM_LOONGARCH_PERF_REGS_H */
> > diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
> > index e5be17009fe8..a213e994db68 100644
> > --- a/arch/loongarch/kernel/Makefile
> > +++ b/arch/loongarch/kernel/Makefile
> > @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA)          += numa.o
> >   obj-$(CONFIG_UNWINDER_GUESS)        += unwind_guess.o
> >   obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
> >
> > +obj-$(CONFIG_PERF_EVENTS)    += perf_event.o perf_regs.o
> > +
> >   CPPFLAGS_vmlinux.lds                := $(KBUILD_CFLAGS)
> > diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
> > new file mode 100644
> > index 000000000000..00cdbcebaf80
> > --- /dev/null
> > +++ b/arch/loongarch/kernel/perf_event.c
> > @@ -0,0 +1,909 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * Linux performance counter support for LoongArch.
>
> Please indicate its MIPS origin and copyright info ;-)
OK, thanks.

>
> > + *
> > + * Copyright (C) 2022 Loongson Technology Corporation Limited
> > + */
> > +
> > +#include <linux/cpumask.h>
> > +#include <linux/interrupt.h>
> > +#include <linux/smp.h>
> > +#include <linux/kernel.h>
> > +#include <linux/perf_event.h>
> > +#include <linux/uaccess.h>
> > +#include <linux/sched/task_stack.h>
> > +
> > +#include <asm/irq.h>
> > +#include <asm/irq_regs.h>
> > +#include <asm/stacktrace.h>
> > +#include <asm/unwind.h>
> > +
> > +/*
> > + * Get the return address for a single stackframe and return a pointer to the
> > + * next frame tail.
> > + */
> > +static unsigned long
> > +user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
> > +{
> > +     struct stack_frame buftail;
> > +     unsigned long err;
> > +     unsigned long __user *user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
> > +
> > +     /* Also check accessibility of one struct frame_tail beyond */
> > +     if (!access_ok(user_frame_tail, sizeof(buftail)))
> > +             return 0;
> > +
> > +     pagefault_disable();
> > +     err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
> > +     pagefault_enable();
> > +
> > +     if (err || (unsigned long)user_frame_tail >= buftail.fp)
> > +             return 0;
> > +
> > +     perf_callchain_store(entry, buftail.ra);
> > +
> > +     return buftail.fp;
> > +}
> > +
> > +void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
> > +                      struct pt_regs *regs)
> > +{
> > +     unsigned long fp;
> > +
> > +     if (perf_guest_state()) {
> > +             /* We don't support guest os callchain now */
> > +             return;
> > +     }
> > +
> > +     perf_callchain_store(entry, regs->csr_era);
> > +
> > +     fp = regs->regs[22];
> > +
> > +     while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
> > +             fp = user_backtrace(entry, fp);
> > +}
> > +
> > +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
> > +                        struct pt_regs *regs)
> > +{
> > +     struct unwind_state state;
> > +     unsigned long addr;
> > +
> > +     for (unwind_start(&state, current, regs);
> > +           !unwind_done(&state); unwind_next_frame(&state)) {
> > +             addr = unwind_get_return_address(&state);
> > +             if (!addr || perf_callchain_store(entry, addr))
> > +                     return;
> > +     }
> > +}
> > +
> > +#define LOONGARCH_MAX_HWEVENTS 4
> > +
> > +struct cpu_hw_events {
> > +     /* Array of events on this cpu. */
> > +     struct perf_event       *events[LOONGARCH_MAX_HWEVENTS];
> > +
> > +     /*
> > +      * Set the bit (indexed by the counter number) when the counter
> > +      * is used for an event.
> > +      */
> > +     unsigned long           used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
> > +
> > +     /*
> > +      * Software copy of the control register for each performance counter.
> > +      * LoongArch CPUs vary in performance counters. They use this differently,
> > +      * and even may not use it.
>
> I can't easily make sense of the paragraph. "Software copy" could mean
> "Saved copy", but how do "use differently" and "even may not use it"
> mean? For the latter I can't deduce if it's originally "some even may
> not exist" in someone's head, and for the former I can't imagine what's
> the possible cases and why we would care.
>
> Maybe explain a little bit more?
I think we should remove those useless lines.

>
> > +      */
> > +     unsigned int            saved_ctrl[LOONGARCH_MAX_HWEVENTS];
> > +};
> > +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
> > +     .saved_ctrl = {0},
> > +};
> > +
> > +/* The description of LoongArch performance events. */
> > +struct loongarch_perf_event {
> > +     unsigned int event_id;
> > +};
> > +
> > +static struct loongarch_perf_event raw_event;
> > +static DEFINE_MUTEX(raw_event_mutex);
> > +
> > +#define C(x) PERF_COUNT_HW_CACHE_##x
> > +#define HW_OP_UNSUPPORTED            0xffffffff
> > +#define CACHE_OP_UNSUPPORTED         0xffffffff
> > +
> > +#define PERF_MAP_ALL_UNSUPPORTED                                     \
> > +     [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
> > +
> > +#define PERF_CACHE_MAP_ALL_UNSUPPORTED                                       \
> > +[0 ... C(MAX) - 1] = {                                                       \
> > +     [0 ... C(OP_MAX) - 1] = {                                       \
> > +             [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED},     \
> > +     },                                                              \
> > +}
> > +
> > +struct loongarch_pmu {
> > +     u64             max_period;
> > +     u64             valid_count;
> > +     u64             overflow;
> > +     const char      *name;
> > +     u64             (*read_counter)(unsigned int idx);
> > +     void            (*write_counter)(unsigned int idx, u64 val);
> > +     const struct loongarch_perf_event *(*map_raw_event)(u64 config);
> > +     const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
> > +     const struct loongarch_perf_event (*cache_event_map)
> > +                             [PERF_COUNT_HW_CACHE_MAX]
> > +                             [PERF_COUNT_HW_CACHE_OP_MAX]
> > +                             [PERF_COUNT_HW_CACHE_RESULT_MAX];
>
> Apparently general_event_map and cache_event_map are not function
> pointers? So the parens around the field name should be removed.
They are not function pointers, but "array pointers", but we shouldn't
change them to "array to pointers"..

>
> > +     unsigned int    num_counters;
> > +};
> > +
> > +static struct loongarch_pmu loongarch_pmu;
> > +
> > +#define M_PERFCTL_EVENT(event)       (event & CSR_PERFCTRL_EVENT)
> > +
> > +#define M_PERFCTL_COUNT_EVENT_WHENEVER       (CSR_PERFCTRL_PLV0 |    \
> > +                                     CSR_PERFCTRL_PLV1 |     \
> > +                                     CSR_PERFCTRL_PLV2 |     \
> > +                                     CSR_PERFCTRL_PLV3 |     \
> > +                                     CSR_PERFCTRL_IE)
> > +
> > +#define M_PERFCTL_CONFIG_MASK                0x1f0000
> > +
> > +#define CNTR_BIT_MASK(n)     (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
>
> Isn't this just GENMASK(n - 1, 0)?
Yes, you are right.

>
> > +
> > +static void resume_local_counters(void);
> > +static void pause_local_counters(void);
> > +
> > +static u64 loongarch_pmu_read_counter(unsigned int idx)
> > +{
> > +     u64 val = -1;
> > +
> > +     switch (idx) {
> > +     case 0:
> > +             val = read_csr_perfcntr0();
> > +             break;
> > +     case 1:
> > +             val = read_csr_perfcntr1();
> > +             break;
> > +     case 2:
> > +             val = read_csr_perfcntr2();
> > +             break;
> > +     case 3:
> > +             val = read_csr_perfcntr3();
> > +             break;
> > +     default:
> > +             WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
> > +             return 0;
> > +     }
> > +
> > +     return val;
> > +}
> > +
> > +static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
> > +{
> > +     switch (idx) {
> > +     case 0:
> > +             write_csr_perfcntr0(val);
> > +             return;
> > +     case 1:
> > +             write_csr_perfcntr1(val);
> > +             return;
> > +     case 2:
> > +             write_csr_perfcntr2(val);
> > +             return;
> > +     case 3:
> > +             write_csr_perfcntr3(val);
> > +             return;
>
> Want a default branch for this function, similar to the read case?
Yes, that is needed.

>
> > +     }
> > +}
> > +
> > +static unsigned int loongarch_pmu_read_control(unsigned int idx)
> > +{
> > +     unsigned int val = -1;
> > +
> > +     switch (idx) {
> > +     case 0:
> > +             val = read_csr_perfctrl0();
> > +             break;
> > +     case 1:
> > +             val = read_csr_perfctrl1();
> > +             break;
> > +     case 2:
> > +             val = read_csr_perfctrl2();
> > +             break;
> > +     case 3:
> > +             val = read_csr_perfctrl3();
> > +             break;
> > +     default:
> > +             WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
> > +             return 0;
> > +     }
> > +
> > +     return val;
> > +}
> > +
> > +static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
> > +{
> > +     switch (idx) {
> > +     case 0:
> > +             write_csr_perfctrl0(val);
> > +             return;
> > +     case 1:
> > +             write_csr_perfctrl1(val);
> > +             return;
> > +     case 2:
> > +             write_csr_perfctrl2(val);
> > +             return;
> > +     case 3:
> > +             write_csr_perfctrl3(val);
> > +             return;
>
> Similarly here.
>
> > +     }
> > +}
> > +
> > +static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc,
> > +                                 struct hw_perf_event *hwc)
> > +{
> > +     int i;
> > +
> > +     for (i = loongarch_pmu.num_counters - 1; i >= 0; i--) {
> > +             if (!test_and_set_bit(i, cpuc->used_mask))
> > +                     return i;
> > +     }
> > +
> > +     return -EAGAIN;
> > +}
> > +
> > +static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
> > +{
> > +     struct perf_event *event = container_of(evt, struct perf_event, hw);
> > +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> > +     unsigned int cpu;
> > +
> > +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> > +
> > +     cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
> > +             (evt->config_base & M_PERFCTL_CONFIG_MASK) |
> > +             /* Make sure interrupt enabled. */
> > +             CSR_PERFCTRL_IE;
> > +
> > +     cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
> > +
> > +     pr_debug("Enabling perf counter for CPU%d\n", cpu);
> > +     /*
> > +      * We do not actually let the counter run. Leave it until start().
> > +      */
> > +}
> > +
> > +static void loongarch_pmu_disable_event(int idx)
> > +{
> > +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> > +     unsigned long flags;
> > +
> > +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> > +
> > +     local_irq_save(flags);
> > +     cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
> > +             ~M_PERFCTL_COUNT_EVENT_WHENEVER;
> > +     loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
> > +     local_irq_restore(flags);
> > +}
> > +
> > +static int loongarch_pmu_event_set_period(struct perf_event *event,
> > +                                 struct hw_perf_event *hwc,
> > +                                 int idx)
> > +{
> > +     u64 left = local64_read(&hwc->period_left);
> > +     u64 period = hwc->sample_period;
> > +     int ret = 0;
> > +
> > +     if (unlikely((left + period) & (1ULL << 63))) {
> > +             /* left underflowed by more than period. */
> > +             left = period;
> > +             local64_set(&hwc->period_left, left);
> > +             hwc->last_period = period;
> > +             ret = 1;
> > +     } else  if (unlikely((left + period) <= period)) {
> > +             /* left underflowed by less than period. */
> > +             left += period;
> > +             local64_set(&hwc->period_left, left);
> > +             hwc->last_period = period;
> > +             ret = 1;
> > +     }
> > +
> > +     if (left > loongarch_pmu.max_period) {
> > +             left = loongarch_pmu.max_period;
> > +             local64_set(&hwc->period_left, left);
> > +     }
> > +
> > +     local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
> > +
> > +     loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
> > +
> > +     perf_event_update_userpage(event);
> > +
> > +     return ret;
> > +}
> > +
> > +static void loongarch_pmu_event_update(struct perf_event *event,
> > +                              struct hw_perf_event *hwc,
> > +                              int idx)
> > +{
> > +     u64 delta;
> > +     u64 prev_raw_count, new_raw_count;
> > +
> > +again:
> > +     prev_raw_count = local64_read(&hwc->prev_count);
> > +     new_raw_count = loongarch_pmu.read_counter(idx);
> > +
> > +     if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
> > +                             new_raw_count) != prev_raw_count)
> > +             goto again;
> > +
> > +     delta = new_raw_count - prev_raw_count;
> > +
> > +     local64_add(delta, &event->count);
> > +     local64_sub(delta, &hwc->period_left);
> > +}
> > +
> > +static void loongarch_pmu_start(struct perf_event *event, int flags)
> > +{
> > +     struct hw_perf_event *hwc = &event->hw;
> > +
> > +     if (flags & PERF_EF_RELOAD)
> > +             WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
> > +
> > +     hwc->state = 0;
> > +
> > +     /* Set the period for the event. */
> > +     loongarch_pmu_event_set_period(event, hwc, hwc->idx);
> > +
> > +     /* Enable the event. */
> > +     loongarch_pmu_enable_event(hwc, hwc->idx);
> > +}
> > +
> > +static void loongarch_pmu_stop(struct perf_event *event, int flags)
> > +{
> > +     struct hw_perf_event *hwc = &event->hw;
> > +
> > +     if (!(hwc->state & PERF_HES_STOPPED)) {
> > +             /* We are working on a local event. */
> > +             loongarch_pmu_disable_event(hwc->idx);
> > +             barrier();
> > +             loongarch_pmu_event_update(event, hwc, hwc->idx);
> > +             hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> > +     }
> > +}
> > +
> > +static int loongarch_pmu_add(struct perf_event *event, int flags)
> > +{
> > +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> > +     struct hw_perf_event *hwc = &event->hw;
> > +     int idx;
> > +     int err = 0;
> > +
> > +     perf_pmu_disable(event->pmu);
> > +
> > +     /* To look for a free counter for this event. */
> > +     idx = loongarch_pmu_alloc_counter(cpuc, hwc);
> > +     if (idx < 0) {
> > +             err = idx;
> > +             goto out;
> > +     }
> > +
> > +     /*
> > +      * If there is an event in the counter we are going to use then
> > +      * make sure it is disabled.
> > +      */
> > +     event->hw.idx = idx;
> > +     loongarch_pmu_disable_event(idx);
> > +     cpuc->events[idx] = event;
> > +
> > +     hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> > +     if (flags & PERF_EF_START)
> > +             loongarch_pmu_start(event, PERF_EF_RELOAD);
> > +
> > +     /* Propagate our changes to the userspace mapping. */
> > +     perf_event_update_userpage(event);
> > +
> > +out:
> > +     perf_pmu_enable(event->pmu);
> > +     return err;
> > +}
> > +
> > +static void loongarch_pmu_del(struct perf_event *event, int flags)
> > +{
> > +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> > +     struct hw_perf_event *hwc = &event->hw;
> > +     int idx = hwc->idx;
> > +
> > +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> > +
> > +     loongarch_pmu_stop(event, PERF_EF_UPDATE);
> > +     cpuc->events[idx] = NULL;
> > +     clear_bit(idx, cpuc->used_mask);
> > +
> > +     perf_event_update_userpage(event);
> > +}
> > +
> > +static void loongarch_pmu_read(struct perf_event *event)
> > +{
> > +     struct hw_perf_event *hwc = &event->hw;
> > +
> > +     /* Don't read disabled counters! */
> > +     if (hwc->idx < 0)
> > +             return;
> > +
> > +     loongarch_pmu_event_update(event, hwc, hwc->idx);
> > +}
> > +
> > +static void loongarch_pmu_enable(struct pmu *pmu)
> > +{
> > +     resume_local_counters();
> > +}
> > +
> > +static void loongarch_pmu_disable(struct pmu *pmu)
> > +{
> > +     pause_local_counters();
> > +}
> > +
> > +static atomic_t active_events = ATOMIC_INIT(0);
> > +static DEFINE_MUTEX(pmu_reserve_mutex);
> > +
> > +static void reset_counters(void *arg);
> > +static int __hw_perf_event_init(struct perf_event *event);
> > +
> > +static void hw_perf_event_destroy(struct perf_event *event)
> > +{
> > +     if (atomic_dec_and_mutex_lock(&active_events,
> > +                             &pmu_reserve_mutex)) {
> > +             /*
> > +              * We must not call the destroy function with interrupts
> > +              * disabled.
> > +              */
> > +             on_each_cpu(reset_counters,
> > +                     (void *)(long)loongarch_pmu.num_counters, 1);
> > +             mutex_unlock(&pmu_reserve_mutex);
> > +     }
> > +}
> > +
> > +/* This is needed by specific irq handlers in perf_event_*.c */
> > +static void handle_associated_event(struct cpu_hw_events *cpuc,
> > +                                 int idx, struct perf_sample_data *data,
> > +                                 struct pt_regs *regs)
> > +{
> > +     struct perf_event *event = cpuc->events[idx];
> > +     struct hw_perf_event *hwc = &event->hw;
> > +
> > +     loongarch_pmu_event_update(event, hwc, idx);
> > +     data->period = event->hw.last_period;
> > +     if (!loongarch_pmu_event_set_period(event, hwc, idx))
> > +             return;
> > +
> > +     if (perf_event_overflow(event, data, regs))
> > +             loongarch_pmu_disable_event(idx);
> > +}
> > +
> > +static irqreturn_t pmu_handle_irq(int irq, void *dev)
> > +{
> > +     int handled = IRQ_NONE;
> > +     unsigned int counters = loongarch_pmu.num_counters;
> > +     u64 counter;
> > +     struct pt_regs *regs;
> > +     struct perf_sample_data data;
> > +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> > +
> > +     /*
> > +      * First we pause the local counters, so that when we are locked
> > +      * here, the counters are all paused. When it gets locked due to
> > +      * perf_disable(), the timer interrupt handler will be delayed.
> > +      *
> > +      * See also loongarch_pmu_start().
> > +      */
> > +     pause_local_counters();
> > +
> > +     regs = get_irq_regs();
> > +
> > +     perf_sample_data_init(&data, 0, 0);
> > +
> > +     switch (counters) {
> > +#define HANDLE_COUNTER(n)                                            \
> > +     case n + 1:                                                     \
> > +             if (test_bit(n, cpuc->used_mask)) {                     \
> > +                     counter = loongarch_pmu.read_counter(n);        \
> > +                     if (counter & loongarch_pmu.overflow) {         \
> > +                             handle_associated_event(cpuc, n, &data, regs); \
> > +                             handled = IRQ_HANDLED;                  \
> > +                     }                                               \
> > +             }
> > +     HANDLE_COUNTER(3)
> > +             fallthrough;
> > +     HANDLE_COUNTER(2)
> > +             fallthrough;
> > +     HANDLE_COUNTER(1)
> > +             fallthrough;
> > +     HANDLE_COUNTER(0)
> > +     }
> > +
> > +     resume_local_counters();
> > +
> > +     /*
> > +      * Do all the work for the pending perf events. We can do this
> > +      * in here because the performance counter interrupt is a regular
> > +      * interrupt, not NMI.
> > +      */
> > +     if (handled == IRQ_HANDLED)
> > +             irq_work_run();
> > +
> > +     return handled;
> > +}
> > +
> > +static int get_pmc_irq(void)
> > +{
> > +     struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
> > +
> > +     if (d)
> > +             return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START);
> > +
> > +     return -EINVAL;
> > +}
> > +
> > +static int loongarch_pmu_event_init(struct perf_event *event)
> > +{
> > +     int r, irq;
> > +     unsigned long flags;
> > +
> > +     /* does not support taken branch sampling */
> > +     if (has_branch_stack(event))
> > +             return -EOPNOTSUPP;
> > +
> > +     switch (event->attr.type) {
> > +     case PERF_TYPE_RAW:
> > +     case PERF_TYPE_HARDWARE:
> > +     case PERF_TYPE_HW_CACHE:
> > +             break;
> > +
> > +     default:
> > +             /* Init it to avoid false validate_group */
> > +             event->hw.event_base = 0xffffffff;
> > +             return -ENOENT;
> > +     }
> > +
> > +     if (event->cpu >= 0 && !cpu_online(event->cpu))
> > +             return -ENODEV;
> > +
> > +     irq = get_pmc_irq();
> > +     flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
> > +     if (!atomic_inc_not_zero(&active_events)) {
> > +             mutex_lock(&pmu_reserve_mutex);
> > +             if (atomic_read(&active_events) == 0) {
> > +                     r = request_irq(irq, pmu_handle_irq,
> > +                                     flags, "Perf_PMU", &loongarch_pmu);
> > +                     if (r < 0) {
> > +                             pr_warn("PMU IRQ request failed\n");
> > +                             return -ENODEV;
> > +                     }
> > +             }
> > +             atomic_inc(&active_events);
> > +             mutex_unlock(&pmu_reserve_mutex);
> > +     }
> > +
> > +     return __hw_perf_event_init(event);
> > +}
> > +
> > +static struct pmu pmu = {
> > +     .pmu_enable     = loongarch_pmu_enable,
> > +     .pmu_disable    = loongarch_pmu_disable,
> > +     .event_init     = loongarch_pmu_event_init,
> > +     .add            = loongarch_pmu_add,
> > +     .del            = loongarch_pmu_del,
> > +     .start          = loongarch_pmu_start,
> > +     .stop           = loongarch_pmu_stop,
> > +     .read           = loongarch_pmu_read,
> > +};
> > +
> > +static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
> > +{
> > +     return (pev->event_id & 0xff);
> > +}
> > +
> > +static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
> > +{
> > +     const struct loongarch_perf_event *pev;
> > +
> > +     pev = &(*loongarch_pmu.general_event_map)[idx];
> > +
> > +     if (pev->event_id == HW_OP_UNSUPPORTED)
> > +             return ERR_PTR(-ENOENT);
> > +
> > +     return pev;
> > +}
> > +
> > +static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
> > +{
> > +     unsigned int cache_type, cache_op, cache_result;
> > +     const struct loongarch_perf_event *pev;
> > +
> > +     cache_type = (config >> 0) & 0xff;
> > +     if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
> > +             return ERR_PTR(-EINVAL);
> > +
> > +     cache_op = (config >> 8) & 0xff;
> > +     if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
> > +             return ERR_PTR(-EINVAL);
> > +
> > +     cache_result = (config >> 16) & 0xff;
> > +     if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
> > +             return ERR_PTR(-EINVAL);
> > +
> > +     pev = &((*loongarch_pmu.cache_event_map)
> > +                                     [cache_type]
> > +                                     [cache_op]
> > +                                     [cache_result]);
> > +
> > +     if (pev->event_id == CACHE_OP_UNSUPPORTED)
> > +             return ERR_PTR(-ENOENT);
> > +
> > +     return pev;
> > +}
> > +
> > +static int validate_group(struct perf_event *event)
> > +{
> > +     struct perf_event *sibling, *leader = event->group_leader;
> > +     struct cpu_hw_events fake_cpuc;
> > +
> > +     memset(&fake_cpuc, 0, sizeof(fake_cpuc));
> > +
> > +     if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
> > +             return -EINVAL;
> > +
> > +     for_each_sibling_event(sibling, leader) {
> > +             if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
> > +                     return -EINVAL;
> > +     }
> > +
> > +     if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
> > +             return -EINVAL;
> > +
> > +     return 0;
> > +}
> > +
> > +static void reset_counters(void *arg)
> > +{
> > +     int counters = (int)(long)arg;
> > +
> > +     switch (counters) {
> > +     case 4:
> > +             loongarch_pmu_write_control(3, 0);
> > +             loongarch_pmu.write_counter(3, 0);
> > +             fallthrough;
> > +     case 3:
> > +             loongarch_pmu_write_control(2, 0);
> > +             loongarch_pmu.write_counter(2, 0);
> > +             fallthrough;
> > +     case 2:
> > +             loongarch_pmu_write_control(1, 0);
> > +             loongarch_pmu.write_counter(1, 0);
> > +             fallthrough;
> > +     case 1:
> > +             loongarch_pmu_write_control(0, 0);
> > +             loongarch_pmu.write_counter(0, 0);
> > +     }
> > +}
> > +
> > +static const struct loongarch_perf_event loongson_new_event_map[PERF_COUNT_HW_MAX] = {
> > +     PERF_MAP_ALL_UNSUPPORTED,
> > +     [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
> > +     [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
> > +     [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
> > +     [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
> > +     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
> > +     [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
> > +};
> > +
> > +static const struct loongarch_perf_event loongson_new_cache_map
> > +                             [PERF_COUNT_HW_CACHE_MAX]
> > +                             [PERF_COUNT_HW_CACHE_OP_MAX]
> > +                             [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
> > +PERF_CACHE_MAP_ALL_UNSUPPORTED,
> > +[C(L1D)] = {
> > +     /*
> > +      * Like some other architectures (e.g. ARM), the performance
> > +      * counters don't differentiate between read and write
> > +      * accesses/misses, so this isn't strictly correct, but it's the
> > +      * best we can do. Writes and reads get combined.
> > +      */
> > +     [C(OP_READ)] = {
> > +             [C(RESULT_ACCESS)]      = { 0x8 },
> > +             [C(RESULT_MISS)]        = { 0x9 },
> > +     },
> > +     [C(OP_WRITE)] = {
> > +             [C(RESULT_ACCESS)]      = { 0x8 },
> > +             [C(RESULT_MISS)]        = { 0x9 },
> > +     },
> > +     [C(OP_PREFETCH)] = {
> > +             [C(RESULT_ACCESS)]      = { 0xaa },
> > +             [C(RESULT_MISS)]        = { 0xa9 },
> > +     },
> > +},
> > +[C(L1I)] = {
> > +     [C(OP_READ)] = {
> > +             [C(RESULT_ACCESS)]      = { 0x6 },
> > +             [C(RESULT_MISS)]        = { 0x7 },
> > +     },
> > +},
> > +[C(LL)] = {
> > +     [C(OP_READ)] = {
> > +             [C(RESULT_ACCESS)]      = { 0xc },
> > +             [C(RESULT_MISS)]        = { 0xd },
> > +     },
> > +     [C(OP_WRITE)] = {
> > +             [C(RESULT_ACCESS)]      = { 0xc },
> > +             [C(RESULT_MISS)]        = { 0xd },
> > +     },
> > +},
> > +[C(ITLB)] = {
> > +     [C(OP_READ)] = {
> > +             [C(RESULT_MISS)]    = { 0x3b },
> > +     },
> > +},
> > +[C(DTLB)] = {
> > +     [C(OP_READ)] = {
> > +             [C(RESULT_ACCESS)]      = { 0x4 },
> > +             [C(RESULT_MISS)]        = { 0x3c },
> > +     },
> > +     [C(OP_WRITE)] = {
> > +             [C(RESULT_ACCESS)]      = { 0x4 },
> > +             [C(RESULT_MISS)]        = { 0x3c },
> > +     },
> > +},
> > +[C(BPU)] = {
> > +     /* Using the same code for *HW_BRANCH* */
> > +     [C(OP_READ)] = {
> > +             [C(RESULT_ACCESS)]  = { 0x02 },
> > +             [C(RESULT_MISS)]    = { 0x03 },
> > +     },
> > +},
> > +};
> > +
> > +static int __hw_perf_event_init(struct perf_event *event)
> > +{
> > +     struct perf_event_attr *attr = &event->attr;
> > +     struct hw_perf_event *hwc = &event->hw;
> > +     const struct loongarch_perf_event *pev;
> > +     int err;
> > +
> > +     /* Returning LoongArch event descriptor for generic perf event. */
> > +     if (PERF_TYPE_HARDWARE == event->attr.type) {
> > +             if (event->attr.config >= PERF_COUNT_HW_MAX)
> > +                     return -EINVAL;
> > +             pev = loongarch_pmu_map_general_event(event->attr.config);
> > +     } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
> > +             pev = loongarch_pmu_map_cache_event(event->attr.config);
> > +     } else if (PERF_TYPE_RAW == event->attr.type) {
> > +             /* We are working on the global raw event. */
> > +             mutex_lock(&raw_event_mutex);
> > +             pev = loongarch_pmu.map_raw_event(event->attr.config);
> > +     } else {
> > +             /* The event type is not (yet) supported. */
> > +             return -EOPNOTSUPP;
> > +     }
> > +
> > +     if (IS_ERR(pev)) {
> > +             if (PERF_TYPE_RAW == event->attr.type)
> > +                     mutex_unlock(&raw_event_mutex);
> > +             return PTR_ERR(pev);
> > +     }
> > +
> > +     /*
> > +      * We allow max flexibility on how each individual counter shared
> > +      * by the single CPU operates (the mode exclusion and the range).
> > +      */
> > +     hwc->config_base = CSR_PERFCTRL_IE;
> > +
> > +     hwc->event_base = loongarch_pmu_perf_event_encode(pev);
> > +     if (PERF_TYPE_RAW == event->attr.type)
> > +             mutex_unlock(&raw_event_mutex);
> > +
> > +     if (!attr->exclude_user) {
> > +             hwc->config_base |= CSR_PERFCTRL_PLV3;
> > +             hwc->config_base |= CSR_PERFCTRL_PLV2;
> > +     }
> > +     if (!attr->exclude_kernel) {
> > +             hwc->config_base |= CSR_PERFCTRL_PLV0;
> > +     }
> > +     if (!attr->exclude_hv) {
> > +             hwc->config_base |= CSR_PERFCTRL_PLV1;
> > +     }
> > +
> > +     hwc->config_base &= M_PERFCTL_CONFIG_MASK;
> > +     /*
> > +      * The event can belong to another cpu. We do not assign a local
> > +      * counter for it for now.
> > +      */
> > +     hwc->idx = -1;
> > +     hwc->config = 0;
> > +
> > +     if (!hwc->sample_period) {
> > +             hwc->sample_period  = loongarch_pmu.max_period;
> > +             hwc->last_period    = hwc->sample_period;
> > +             local64_set(&hwc->period_left, hwc->sample_period);
> > +     }
> > +
> > +     err = 0;
> > +     if (event->group_leader != event)
> > +             err = validate_group(event);
> > +
> > +     event->destroy = hw_perf_event_destroy;
> > +
> > +     if (err)
> > +             event->destroy(event);
> > +
> > +     return err;
> > +}
> > +
> > +static void pause_local_counters(void)
> > +{
> > +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> > +     int ctr = loongarch_pmu.num_counters;
> > +     unsigned long flags;
> > +
> > +     local_irq_save(flags);
> > +     do {
> > +             ctr--;
> > +             cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
> > +             loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
> > +                                      ~M_PERFCTL_COUNT_EVENT_WHENEVER);
> > +     } while (ctr > 0);
> > +     local_irq_restore(flags);
> > +}
> > +
> > +static void resume_local_counters(void)
> > +{
> > +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> > +     int ctr = loongarch_pmu.num_counters;
> > +
> > +     do {
> > +             ctr--;
> > +             loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
> > +     } while (ctr > 0);
> > +}
> > +
> > +static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
> > +{
> > +     raw_event.event_id = config & 0xff;
> > +
> > +     return &raw_event;
> > +}
> > +
> > +static int __init
> > +init_hw_perf_events(void)
> > +{
> > +     int counters = 4;
> > +
> > +     if (!cpu_has_pmp)
> > +             return -ENODEV;
> > +
> > +     pr_info("Performance counters: ");
> > +
> > +     loongarch_pmu.num_counters = counters;
> > +     loongarch_pmu.max_period = (1ULL << 63) - 1;
> > +     loongarch_pmu.valid_count = (1ULL << 63) - 1;
> > +     loongarch_pmu.overflow = 1ULL << 63;
> > +     loongarch_pmu.name = "loongarch/loongson64";
> > +     loongarch_pmu.read_counter = loongarch_pmu_read_counter;
> > +     loongarch_pmu.write_counter = loongarch_pmu_write_counter;
> > +     loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
> > +     loongarch_pmu.general_event_map = &loongson_new_event_map;
> > +     loongarch_pmu.cache_event_map = &loongson_new_cache_map;
> > +
> > +     on_each_cpu(reset_counters, (void *)(long)counters, 1);
> > +
> > +     pr_cont("%s PMU enabled, %d %d-bit counters available to each "
> > +             "CPU.\n", loongarch_pmu.name, counters, 64);
> > +
> > +     perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
> > +
> > +     return 0;
> > +}
> > +early_initcall(init_hw_perf_events);
> > diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c
> > new file mode 100644
> > index 000000000000..a5e9768e8414
> > --- /dev/null
> > +++ b/arch/loongarch/kernel/perf_regs.c
> > @@ -0,0 +1,50 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * Copyright (C) 2022 Loongson Technology Corporation Limited
>
> And this file.
OK, thanks.

Huacai
>
> > + */
> > +
> > +#include <linux/perf_event.h>
> > +
> > +#include <asm/ptrace.h>
> > +
> > +#ifdef CONFIG_32BIT
> > +u64 perf_reg_abi(struct task_struct *tsk)
> > +{
> > +     return PERF_SAMPLE_REGS_ABI_32;
> > +}
> > +#else /* Must be CONFIG_64BIT */
> > +u64 perf_reg_abi(struct task_struct *tsk)
> > +{
> > +     if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
> > +             return PERF_SAMPLE_REGS_ABI_32;
> > +     else
> > +             return PERF_SAMPLE_REGS_ABI_64;
> > +}
> > +#endif /* CONFIG_32BIT */
> > +
> > +int perf_reg_validate(u64 mask)
> > +{
> > +     if (!mask)
> > +             return -EINVAL;
> > +     if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
> > +             return -EINVAL;
> > +     return 0;
> > +}
> > +
> > +u64 perf_reg_value(struct pt_regs *regs, int idx)
> > +{
> > +     if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
> > +             return 0;
> > +
> > +     if ((u32)idx == PERF_REG_LOONGARCH_PC)
> > +             return regs->csr_era;
> > +
> > +     return regs->regs[idx];
> > +}
> > +
> > +void perf_get_regs_user(struct perf_regs *regs_user,
> > +                     struct pt_regs *regs)
> > +{
> > +     regs_user->regs = task_pt_regs(current);
> > +     regs_user->abi = perf_reg_abi(current);
> > +}
>
> --
> WANG "xen0n" Xuerui
>
> Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] LoongArch: Add perf events support
  2022-08-15 12:47 [PATCH] LoongArch: Add perf events support Huacai Chen
  2022-08-16  5:46 ` WANG Xuerui
@ 2022-08-16  8:59 ` kernel test robot
  1 sibling, 0 replies; 7+ messages in thread
From: kernel test robot @ 2022-08-16  8:59 UTC (permalink / raw)
  To: Huacai Chen, Arnd Bergmann, Huacai Chen, Peter Zijlstra,
	Ingo Molnar, Arnaldo Carvalho de Melo, Mark Rutland,
	Alexander Shishkin, Jiri Olsa, Namhyung Kim
  Cc: kbuild-all, loongarch, linux-arch, Xuefeng Li, Guo Ren,
	Xuerui Wang, Jiaxun Yang, linux-perf-users, linux-kernel

Hi Huacai,

I love your patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v6.0-rc1 next-20220816]
[cannot apply to soc/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Huacai-Chen/LoongArch-Add-perf-events-support/20220815-204852
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 568035b01cfb107af8d2e4bd2fb9aea22cf5b868
config: loongarch-randconfig-s051-20220815 (https://download.01.org/0day-ci/archive/20220816/202208161648.zq48ilEV-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 12.1.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # apt-get install sparse
        # sparse version: v0.6.4-39-gce1a6720-dirty
        # https://github.com/intel-lab-lkp/linux/commit/0e6d9490ff3f6129799675b9288135022a0908e2
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Huacai-Chen/LoongArch-Add-perf-events-support/20220815-204852
        git checkout 0e6d9490ff3f6129799675b9288135022a0908e2
        # save the config file
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=loongarch 

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

sparse warnings: (new ones prefixed by >>)
>> arch/loongarch/kernel/perf_event.c:30:50: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected unsigned long [noderef] __user *user_frame_tail @@     got unsigned long * @@
   arch/loongarch/kernel/perf_event.c:30:50: sparse:     expected unsigned long [noderef] __user *user_frame_tail
   arch/loongarch/kernel/perf_event.c:30:50: sparse:     got unsigned long *
   arch/loongarch/kernel/perf_event.c: note: in included file (through arch/loongarch/include/asm/cpu-info.h, arch/loongarch/include/asm/processor.h, ...):
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: undefined identifier '__builtin_loongarch_csrrd_d'
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: undefined identifier '__builtin_loongarch_csrwr_d'
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:237:16: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type
   arch/loongarch/include/asm/loongarch.h:247:9: sparse: sparse: cast from unknown type

vim +30 arch/loongarch/kernel/perf_event.c

    20	
    21	/*
    22	 * Get the return address for a single stackframe and return a pointer to the
    23	 * next frame tail.
    24	 */
    25	static unsigned long
    26	user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
    27	{
    28		struct stack_frame buftail;
    29		unsigned long err;
  > 30		unsigned long __user *user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
    31	
    32		/* Also check accessibility of one struct frame_tail beyond */
    33		if (!access_ok(user_frame_tail, sizeof(buftail)))
    34			return 0;
    35	
    36		pagefault_disable();
    37		err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
    38		pagefault_enable();
    39	
    40		if (err || (unsigned long)user_frame_tail >= buftail.fp)
    41			return 0;
    42	
    43		perf_callchain_store(entry, buftail.ra);
    44	
    45		return buftail.fp;
    46	}
    47	

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] LoongArch: Add perf events support
  2022-08-16  8:18   ` Huacai Chen
@ 2022-08-16 10:07     ` Qi Hu
  2022-08-16 10:24       ` Huacai Chen
  0 siblings, 1 reply; 7+ messages in thread
From: Qi Hu @ 2022-08-16 10:07 UTC (permalink / raw)
  To: Huacai Chen, WANG Xuerui
  Cc: Huacai Chen, Arnd Bergmann, Peter Zijlstra, Ingo Molnar,
	Arnaldo Carvalho de Melo, Mark Rutland, Alexander Shishkin,
	Jiri Olsa, Namhyung Kim, loongarch, linux-arch, Xuefeng Li,
	Guo Ren, Jiaxun Yang, linux-perf-users, LKML


On 2022/8/16 16:18, Huacai Chen wrote:
>   Hi, Xuerui,
>
> On Tue, Aug 16, 2022 at 1:46 PM WANG Xuerui <kernel@xen0n.name> wrote:
>> On 2022/8/15 20:47, Huacai Chen wrote:
>>> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
>>> ---
>>>    arch/loongarch/Kconfig                      |   2 +
>>>    arch/loongarch/include/uapi/asm/perf_regs.h |  40 +
>>>    arch/loongarch/kernel/Makefile              |   2 +
>>>    arch/loongarch/kernel/perf_event.c          | 909 ++++++++++++++++++++
>>>    arch/loongarch/kernel/perf_regs.c           |  50 ++
>>>    5 files changed, 1003 insertions(+)
>>>    create mode 100644 arch/loongarch/include/uapi/asm/perf_regs.h
>>>    create mode 100644 arch/loongarch/kernel/perf_event.c
>>>    create mode 100644 arch/loongarch/kernel/perf_regs.c
>> The code seems mostly ripped from arch/mips/kernel/perf_event_mipsxx.c.
>> I reviewed about half of the code then suddenly realized I might be
>> looking at MIPS code, given some of the English strings there seemed way
>> too "natural"...
>>
>> But unfortunately, at least for 3A5000 whose micro-architecture is
>> largely shared with the MIPS-implementing 3A4000, it seems inevitable to
>> involve some of the more MIPS-looking logic. The 1st-generation LA
>> privileged architecture is way too much MIPS-like after all, so if we
>> want any support for the 3A5000 we'd have to include this.
>>
>>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>>> index 24665808cf3d..9478f9646fa5 100644
>>> --- a/arch/loongarch/Kconfig
>>> +++ b/arch/loongarch/Kconfig
>>> @@ -93,6 +93,8 @@ config LOONGARCH
>>>        select HAVE_NMI
>>>        select HAVE_PCI
>>>        select HAVE_PERF_EVENTS
>>> +     select HAVE_PERF_REGS
>>> +     select HAVE_PERF_USER_STACK_DUMP
>>>        select HAVE_REGS_AND_STACK_ACCESS_API
>>>        select HAVE_RSEQ
>>>        select HAVE_SETUP_PER_CPU_AREA if NUMA
>>> diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h
>>> new file mode 100644
>>> index 000000000000..9943d418e01d
>>> --- /dev/null
>>> +++ b/arch/loongarch/include/uapi/asm/perf_regs.h
>>> @@ -0,0 +1,40 @@
>>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
>>> +#ifndef _ASM_LOONGARCH_PERF_REGS_H
>>> +#define _ASM_LOONGARCH_PERF_REGS_H
>>> +
>>> +enum perf_event_loongarch_regs {
>>> +     PERF_REG_LOONGARCH_PC,
>>> +     PERF_REG_LOONGARCH_R1,
>>> +     PERF_REG_LOONGARCH_R2,
>>> +     PERF_REG_LOONGARCH_R3,
>>> +     PERF_REG_LOONGARCH_R4,
>>> +     PERF_REG_LOONGARCH_R5,
>>> +     PERF_REG_LOONGARCH_R6,
>>> +     PERF_REG_LOONGARCH_R7,
>>> +     PERF_REG_LOONGARCH_R8,
>>> +     PERF_REG_LOONGARCH_R9,
>>> +     PERF_REG_LOONGARCH_R10,
>>> +     PERF_REG_LOONGARCH_R11,
>>> +     PERF_REG_LOONGARCH_R12,
>>> +     PERF_REG_LOONGARCH_R13,
>>> +     PERF_REG_LOONGARCH_R14,
>>> +     PERF_REG_LOONGARCH_R15,
>>> +     PERF_REG_LOONGARCH_R16,
>>> +     PERF_REG_LOONGARCH_R17,
>>> +     PERF_REG_LOONGARCH_R18,
>>> +     PERF_REG_LOONGARCH_R19,
>>> +     PERF_REG_LOONGARCH_R20,
>>> +     PERF_REG_LOONGARCH_R21,
>>> +     PERF_REG_LOONGARCH_R22,
>>> +     PERF_REG_LOONGARCH_R23,
>>> +     PERF_REG_LOONGARCH_R24,
>>> +     PERF_REG_LOONGARCH_R25,
>>> +     PERF_REG_LOONGARCH_R26,
>>> +     PERF_REG_LOONGARCH_R27,
>>> +     PERF_REG_LOONGARCH_R28,
>>> +     PERF_REG_LOONGARCH_R29,
>>> +     PERF_REG_LOONGARCH_R30,
>>> +     PERF_REG_LOONGARCH_R31,
>>> +     PERF_REG_LOONGARCH_MAX = PERF_REG_LOONGARCH_R31 + 1,
>> No need for this "PERF_REG_LOONGARCH_R31 + 1" because it's what happens
>> without the assignment anyway?
> PERF_REG_LOONGARCH_MAX is used in perf_events.c

I think Xuerui wants to say "PERF_REG_LOONGARCH_MAX" is enough, and "= 
PERF_REG_LOONGARCH_R31 + 1" can be removed.

Qi

>>> +};
>>> +#endif /* _ASM_LOONGARCH_PERF_REGS_H */
>>> diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
>>> index e5be17009fe8..a213e994db68 100644
>>> --- a/arch/loongarch/kernel/Makefile
>>> +++ b/arch/loongarch/kernel/Makefile
>>> @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA)          += numa.o
>>>    obj-$(CONFIG_UNWINDER_GUESS)        += unwind_guess.o
>>>    obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>>>
>>> +obj-$(CONFIG_PERF_EVENTS)    += perf_event.o perf_regs.o
>>> +
>>>    CPPFLAGS_vmlinux.lds                := $(KBUILD_CFLAGS)
>>> diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
>>> new file mode 100644
>>> index 000000000000..00cdbcebaf80
>>> --- /dev/null
>>> +++ b/arch/loongarch/kernel/perf_event.c
>>> @@ -0,0 +1,909 @@
>>> +// SPDX-License-Identifier: GPL-2.0
>>> +/*
>>> + * Linux performance counter support for LoongArch.
>> Please indicate its MIPS origin and copyright info ;-)
> OK, thanks.
>
>>> + *
>>> + * Copyright (C) 2022 Loongson Technology Corporation Limited
>>> + */
>>> +
>>> +#include <linux/cpumask.h>
>>> +#include <linux/interrupt.h>
>>> +#include <linux/smp.h>
>>> +#include <linux/kernel.h>
>>> +#include <linux/perf_event.h>
>>> +#include <linux/uaccess.h>
>>> +#include <linux/sched/task_stack.h>
>>> +
>>> +#include <asm/irq.h>
>>> +#include <asm/irq_regs.h>
>>> +#include <asm/stacktrace.h>
>>> +#include <asm/unwind.h>
>>> +
>>> +/*
>>> + * Get the return address for a single stackframe and return a pointer to the
>>> + * next frame tail.
>>> + */
>>> +static unsigned long
>>> +user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
>>> +{
>>> +     struct stack_frame buftail;
>>> +     unsigned long err;
>>> +     unsigned long __user *user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
>>> +
>>> +     /* Also check accessibility of one struct frame_tail beyond */
>>> +     if (!access_ok(user_frame_tail, sizeof(buftail)))
>>> +             return 0;
>>> +
>>> +     pagefault_disable();
>>> +     err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
>>> +     pagefault_enable();
>>> +
>>> +     if (err || (unsigned long)user_frame_tail >= buftail.fp)
>>> +             return 0;
>>> +
>>> +     perf_callchain_store(entry, buftail.ra);
>>> +
>>> +     return buftail.fp;
>>> +}
>>> +
>>> +void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
>>> +                      struct pt_regs *regs)
>>> +{
>>> +     unsigned long fp;
>>> +
>>> +     if (perf_guest_state()) {
>>> +             /* We don't support guest os callchain now */
>>> +             return;
>>> +     }
>>> +
>>> +     perf_callchain_store(entry, regs->csr_era);
>>> +
>>> +     fp = regs->regs[22];
>>> +
>>> +     while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
>>> +             fp = user_backtrace(entry, fp);
>>> +}
>>> +
>>> +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
>>> +                        struct pt_regs *regs)
>>> +{
>>> +     struct unwind_state state;
>>> +     unsigned long addr;
>>> +
>>> +     for (unwind_start(&state, current, regs);
>>> +           !unwind_done(&state); unwind_next_frame(&state)) {
>>> +             addr = unwind_get_return_address(&state);
>>> +             if (!addr || perf_callchain_store(entry, addr))
>>> +                     return;
>>> +     }
>>> +}
>>> +
>>> +#define LOONGARCH_MAX_HWEVENTS 4
>>> +
>>> +struct cpu_hw_events {
>>> +     /* Array of events on this cpu. */
>>> +     struct perf_event       *events[LOONGARCH_MAX_HWEVENTS];
>>> +
>>> +     /*
>>> +      * Set the bit (indexed by the counter number) when the counter
>>> +      * is used for an event.
>>> +      */
>>> +     unsigned long           used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
>>> +
>>> +     /*
>>> +      * Software copy of the control register for each performance counter.
>>> +      * LoongArch CPUs vary in performance counters. They use this differently,
>>> +      * and even may not use it.
>> I can't easily make sense of the paragraph. "Software copy" could mean
>> "Saved copy", but how do "use differently" and "even may not use it"
>> mean? For the latter I can't deduce if it's originally "some even may
>> not exist" in someone's head, and for the former I can't imagine what's
>> the possible cases and why we would care.
>>
>> Maybe explain a little bit more?
> I think we should remove those useless lines.
>
>>> +      */
>>> +     unsigned int            saved_ctrl[LOONGARCH_MAX_HWEVENTS];
>>> +};
>>> +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
>>> +     .saved_ctrl = {0},
>>> +};
>>> +
>>> +/* The description of LoongArch performance events. */
>>> +struct loongarch_perf_event {
>>> +     unsigned int event_id;
>>> +};
>>> +
>>> +static struct loongarch_perf_event raw_event;
>>> +static DEFINE_MUTEX(raw_event_mutex);
>>> +
>>> +#define C(x) PERF_COUNT_HW_CACHE_##x
>>> +#define HW_OP_UNSUPPORTED            0xffffffff
>>> +#define CACHE_OP_UNSUPPORTED         0xffffffff
>>> +
>>> +#define PERF_MAP_ALL_UNSUPPORTED                                     \
>>> +     [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
>>> +
>>> +#define PERF_CACHE_MAP_ALL_UNSUPPORTED                                       \
>>> +[0 ... C(MAX) - 1] = {                                                       \
>>> +     [0 ... C(OP_MAX) - 1] = {                                       \
>>> +             [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED},     \
>>> +     },                                                              \
>>> +}
>>> +
>>> +struct loongarch_pmu {
>>> +     u64             max_period;
>>> +     u64             valid_count;
>>> +     u64             overflow;
>>> +     const char      *name;
>>> +     u64             (*read_counter)(unsigned int idx);
>>> +     void            (*write_counter)(unsigned int idx, u64 val);
>>> +     const struct loongarch_perf_event *(*map_raw_event)(u64 config);
>>> +     const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
>>> +     const struct loongarch_perf_event (*cache_event_map)
>>> +                             [PERF_COUNT_HW_CACHE_MAX]
>>> +                             [PERF_COUNT_HW_CACHE_OP_MAX]
>>> +                             [PERF_COUNT_HW_CACHE_RESULT_MAX];
>> Apparently general_event_map and cache_event_map are not function
>> pointers? So the parens around the field name should be removed.
> They are not function pointers, but "array pointers", but we shouldn't
> change them to "array to pointers"..
>
>>> +     unsigned int    num_counters;
>>> +};
>>> +
>>> +static struct loongarch_pmu loongarch_pmu;
>>> +
>>> +#define M_PERFCTL_EVENT(event)       (event & CSR_PERFCTRL_EVENT)
>>> +
>>> +#define M_PERFCTL_COUNT_EVENT_WHENEVER       (CSR_PERFCTRL_PLV0 |    \
>>> +                                     CSR_PERFCTRL_PLV1 |     \
>>> +                                     CSR_PERFCTRL_PLV2 |     \
>>> +                                     CSR_PERFCTRL_PLV3 |     \
>>> +                                     CSR_PERFCTRL_IE)
>>> +
>>> +#define M_PERFCTL_CONFIG_MASK                0x1f0000
>>> +
>>> +#define CNTR_BIT_MASK(n)     (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
>> Isn't this just GENMASK(n - 1, 0)?
> Yes, you are right.
>
>>> +
>>> +static void resume_local_counters(void);
>>> +static void pause_local_counters(void);
>>> +
>>> +static u64 loongarch_pmu_read_counter(unsigned int idx)
>>> +{
>>> +     u64 val = -1;
>>> +
>>> +     switch (idx) {
>>> +     case 0:
>>> +             val = read_csr_perfcntr0();
>>> +             break;
>>> +     case 1:
>>> +             val = read_csr_perfcntr1();
>>> +             break;
>>> +     case 2:
>>> +             val = read_csr_perfcntr2();
>>> +             break;
>>> +     case 3:
>>> +             val = read_csr_perfcntr3();
>>> +             break;
>>> +     default:
>>> +             WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
>>> +             return 0;
>>> +     }
>>> +
>>> +     return val;
>>> +}
>>> +
>>> +static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
>>> +{
>>> +     switch (idx) {
>>> +     case 0:
>>> +             write_csr_perfcntr0(val);
>>> +             return;
>>> +     case 1:
>>> +             write_csr_perfcntr1(val);
>>> +             return;
>>> +     case 2:
>>> +             write_csr_perfcntr2(val);
>>> +             return;
>>> +     case 3:
>>> +             write_csr_perfcntr3(val);
>>> +             return;
>> Want a default branch for this function, similar to the read case?
> Yes, that is needed.
>
>>> +     }
>>> +}
>>> +
>>> +static unsigned int loongarch_pmu_read_control(unsigned int idx)
>>> +{
>>> +     unsigned int val = -1;
>>> +
>>> +     switch (idx) {
>>> +     case 0:
>>> +             val = read_csr_perfctrl0();
>>> +             break;
>>> +     case 1:
>>> +             val = read_csr_perfctrl1();
>>> +             break;
>>> +     case 2:
>>> +             val = read_csr_perfctrl2();
>>> +             break;
>>> +     case 3:
>>> +             val = read_csr_perfctrl3();
>>> +             break;
>>> +     default:
>>> +             WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
>>> +             return 0;
>>> +     }
>>> +
>>> +     return val;
>>> +}
>>> +
>>> +static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
>>> +{
>>> +     switch (idx) {
>>> +     case 0:
>>> +             write_csr_perfctrl0(val);
>>> +             return;
>>> +     case 1:
>>> +             write_csr_perfctrl1(val);
>>> +             return;
>>> +     case 2:
>>> +             write_csr_perfctrl2(val);
>>> +             return;
>>> +     case 3:
>>> +             write_csr_perfctrl3(val);
>>> +             return;
>> Similarly here.
>>
>>> +     }
>>> +}
>>> +
>>> +static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc,
>>> +                                 struct hw_perf_event *hwc)
>>> +{
>>> +     int i;
>>> +
>>> +     for (i = loongarch_pmu.num_counters - 1; i >= 0; i--) {
>>> +             if (!test_and_set_bit(i, cpuc->used_mask))
>>> +                     return i;
>>> +     }
>>> +
>>> +     return -EAGAIN;
>>> +}
>>> +
>>> +static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
>>> +{
>>> +     struct perf_event *event = container_of(evt, struct perf_event, hw);
>>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>> +     unsigned int cpu;
>>> +
>>> +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
>>> +
>>> +     cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
>>> +             (evt->config_base & M_PERFCTL_CONFIG_MASK) |
>>> +             /* Make sure interrupt enabled. */
>>> +             CSR_PERFCTRL_IE;
>>> +
>>> +     cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
>>> +
>>> +     pr_debug("Enabling perf counter for CPU%d\n", cpu);
>>> +     /*
>>> +      * We do not actually let the counter run. Leave it until start().
>>> +      */
>>> +}
>>> +
>>> +static void loongarch_pmu_disable_event(int idx)
>>> +{
>>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>> +     unsigned long flags;
>>> +
>>> +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
>>> +
>>> +     local_irq_save(flags);
>>> +     cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
>>> +             ~M_PERFCTL_COUNT_EVENT_WHENEVER;
>>> +     loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
>>> +     local_irq_restore(flags);
>>> +}
>>> +
>>> +static int loongarch_pmu_event_set_period(struct perf_event *event,
>>> +                                 struct hw_perf_event *hwc,
>>> +                                 int idx)
>>> +{
>>> +     u64 left = local64_read(&hwc->period_left);
>>> +     u64 period = hwc->sample_period;
>>> +     int ret = 0;
>>> +
>>> +     if (unlikely((left + period) & (1ULL << 63))) {
>>> +             /* left underflowed by more than period. */
>>> +             left = period;
>>> +             local64_set(&hwc->period_left, left);
>>> +             hwc->last_period = period;
>>> +             ret = 1;
>>> +     } else  if (unlikely((left + period) <= period)) {
>>> +             /* left underflowed by less than period. */
>>> +             left += period;
>>> +             local64_set(&hwc->period_left, left);
>>> +             hwc->last_period = period;
>>> +             ret = 1;
>>> +     }
>>> +
>>> +     if (left > loongarch_pmu.max_period) {
>>> +             left = loongarch_pmu.max_period;
>>> +             local64_set(&hwc->period_left, left);
>>> +     }
>>> +
>>> +     local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
>>> +
>>> +     loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
>>> +
>>> +     perf_event_update_userpage(event);
>>> +
>>> +     return ret;
>>> +}
>>> +
>>> +static void loongarch_pmu_event_update(struct perf_event *event,
>>> +                              struct hw_perf_event *hwc,
>>> +                              int idx)
>>> +{
>>> +     u64 delta;
>>> +     u64 prev_raw_count, new_raw_count;
>>> +
>>> +again:
>>> +     prev_raw_count = local64_read(&hwc->prev_count);
>>> +     new_raw_count = loongarch_pmu.read_counter(idx);
>>> +
>>> +     if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
>>> +                             new_raw_count) != prev_raw_count)
>>> +             goto again;
>>> +
>>> +     delta = new_raw_count - prev_raw_count;
>>> +
>>> +     local64_add(delta, &event->count);
>>> +     local64_sub(delta, &hwc->period_left);
>>> +}
>>> +
>>> +static void loongarch_pmu_start(struct perf_event *event, int flags)
>>> +{
>>> +     struct hw_perf_event *hwc = &event->hw;
>>> +
>>> +     if (flags & PERF_EF_RELOAD)
>>> +             WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
>>> +
>>> +     hwc->state = 0;
>>> +
>>> +     /* Set the period for the event. */
>>> +     loongarch_pmu_event_set_period(event, hwc, hwc->idx);
>>> +
>>> +     /* Enable the event. */
>>> +     loongarch_pmu_enable_event(hwc, hwc->idx);
>>> +}
>>> +
>>> +static void loongarch_pmu_stop(struct perf_event *event, int flags)
>>> +{
>>> +     struct hw_perf_event *hwc = &event->hw;
>>> +
>>> +     if (!(hwc->state & PERF_HES_STOPPED)) {
>>> +             /* We are working on a local event. */
>>> +             loongarch_pmu_disable_event(hwc->idx);
>>> +             barrier();
>>> +             loongarch_pmu_event_update(event, hwc, hwc->idx);
>>> +             hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
>>> +     }
>>> +}
>>> +
>>> +static int loongarch_pmu_add(struct perf_event *event, int flags)
>>> +{
>>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>> +     struct hw_perf_event *hwc = &event->hw;
>>> +     int idx;
>>> +     int err = 0;
>>> +
>>> +     perf_pmu_disable(event->pmu);
>>> +
>>> +     /* To look for a free counter for this event. */
>>> +     idx = loongarch_pmu_alloc_counter(cpuc, hwc);
>>> +     if (idx < 0) {
>>> +             err = idx;
>>> +             goto out;
>>> +     }
>>> +
>>> +     /*
>>> +      * If there is an event in the counter we are going to use then
>>> +      * make sure it is disabled.
>>> +      */
>>> +     event->hw.idx = idx;
>>> +     loongarch_pmu_disable_event(idx);
>>> +     cpuc->events[idx] = event;
>>> +
>>> +     hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
>>> +     if (flags & PERF_EF_START)
>>> +             loongarch_pmu_start(event, PERF_EF_RELOAD);
>>> +
>>> +     /* Propagate our changes to the userspace mapping. */
>>> +     perf_event_update_userpage(event);
>>> +
>>> +out:
>>> +     perf_pmu_enable(event->pmu);
>>> +     return err;
>>> +}
>>> +
>>> +static void loongarch_pmu_del(struct perf_event *event, int flags)
>>> +{
>>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>> +     struct hw_perf_event *hwc = &event->hw;
>>> +     int idx = hwc->idx;
>>> +
>>> +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
>>> +
>>> +     loongarch_pmu_stop(event, PERF_EF_UPDATE);
>>> +     cpuc->events[idx] = NULL;
>>> +     clear_bit(idx, cpuc->used_mask);
>>> +
>>> +     perf_event_update_userpage(event);
>>> +}
>>> +
>>> +static void loongarch_pmu_read(struct perf_event *event)
>>> +{
>>> +     struct hw_perf_event *hwc = &event->hw;
>>> +
>>> +     /* Don't read disabled counters! */
>>> +     if (hwc->idx < 0)
>>> +             return;
>>> +
>>> +     loongarch_pmu_event_update(event, hwc, hwc->idx);
>>> +}
>>> +
>>> +static void loongarch_pmu_enable(struct pmu *pmu)
>>> +{
>>> +     resume_local_counters();
>>> +}
>>> +
>>> +static void loongarch_pmu_disable(struct pmu *pmu)
>>> +{
>>> +     pause_local_counters();
>>> +}
>>> +
>>> +static atomic_t active_events = ATOMIC_INIT(0);
>>> +static DEFINE_MUTEX(pmu_reserve_mutex);
>>> +
>>> +static void reset_counters(void *arg);
>>> +static int __hw_perf_event_init(struct perf_event *event);
>>> +
>>> +static void hw_perf_event_destroy(struct perf_event *event)
>>> +{
>>> +     if (atomic_dec_and_mutex_lock(&active_events,
>>> +                             &pmu_reserve_mutex)) {
>>> +             /*
>>> +              * We must not call the destroy function with interrupts
>>> +              * disabled.
>>> +              */
>>> +             on_each_cpu(reset_counters,
>>> +                     (void *)(long)loongarch_pmu.num_counters, 1);
>>> +             mutex_unlock(&pmu_reserve_mutex);
>>> +     }
>>> +}
>>> +
>>> +/* This is needed by specific irq handlers in perf_event_*.c */
>>> +static void handle_associated_event(struct cpu_hw_events *cpuc,
>>> +                                 int idx, struct perf_sample_data *data,
>>> +                                 struct pt_regs *regs)
>>> +{
>>> +     struct perf_event *event = cpuc->events[idx];
>>> +     struct hw_perf_event *hwc = &event->hw;
>>> +
>>> +     loongarch_pmu_event_update(event, hwc, idx);
>>> +     data->period = event->hw.last_period;
>>> +     if (!loongarch_pmu_event_set_period(event, hwc, idx))
>>> +             return;
>>> +
>>> +     if (perf_event_overflow(event, data, regs))
>>> +             loongarch_pmu_disable_event(idx);
>>> +}
>>> +
>>> +static irqreturn_t pmu_handle_irq(int irq, void *dev)
>>> +{
>>> +     int handled = IRQ_NONE;
>>> +     unsigned int counters = loongarch_pmu.num_counters;
>>> +     u64 counter;
>>> +     struct pt_regs *regs;
>>> +     struct perf_sample_data data;
>>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>> +
>>> +     /*
>>> +      * First we pause the local counters, so that when we are locked
>>> +      * here, the counters are all paused. When it gets locked due to
>>> +      * perf_disable(), the timer interrupt handler will be delayed.
>>> +      *
>>> +      * See also loongarch_pmu_start().
>>> +      */
>>> +     pause_local_counters();
>>> +
>>> +     regs = get_irq_regs();
>>> +
>>> +     perf_sample_data_init(&data, 0, 0);
>>> +
>>> +     switch (counters) {
>>> +#define HANDLE_COUNTER(n)                                            \
>>> +     case n + 1:                                                     \
>>> +             if (test_bit(n, cpuc->used_mask)) {                     \
>>> +                     counter = loongarch_pmu.read_counter(n);        \
>>> +                     if (counter & loongarch_pmu.overflow) {         \
>>> +                             handle_associated_event(cpuc, n, &data, regs); \
>>> +                             handled = IRQ_HANDLED;                  \
>>> +                     }                                               \
>>> +             }
>>> +     HANDLE_COUNTER(3)
>>> +             fallthrough;
>>> +     HANDLE_COUNTER(2)
>>> +             fallthrough;
>>> +     HANDLE_COUNTER(1)
>>> +             fallthrough;
>>> +     HANDLE_COUNTER(0)
>>> +     }
>>> +
>>> +     resume_local_counters();
>>> +
>>> +     /*
>>> +      * Do all the work for the pending perf events. We can do this
>>> +      * in here because the performance counter interrupt is a regular
>>> +      * interrupt, not NMI.
>>> +      */
>>> +     if (handled == IRQ_HANDLED)
>>> +             irq_work_run();
>>> +
>>> +     return handled;
>>> +}
>>> +
>>> +static int get_pmc_irq(void)
>>> +{
>>> +     struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
>>> +
>>> +     if (d)
>>> +             return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START);
>>> +
>>> +     return -EINVAL;
>>> +}
>>> +
>>> +static int loongarch_pmu_event_init(struct perf_event *event)
>>> +{
>>> +     int r, irq;
>>> +     unsigned long flags;
>>> +
>>> +     /* does not support taken branch sampling */
>>> +     if (has_branch_stack(event))
>>> +             return -EOPNOTSUPP;
>>> +
>>> +     switch (event->attr.type) {
>>> +     case PERF_TYPE_RAW:
>>> +     case PERF_TYPE_HARDWARE:
>>> +     case PERF_TYPE_HW_CACHE:
>>> +             break;
>>> +
>>> +     default:
>>> +             /* Init it to avoid false validate_group */
>>> +             event->hw.event_base = 0xffffffff;
>>> +             return -ENOENT;
>>> +     }
>>> +
>>> +     if (event->cpu >= 0 && !cpu_online(event->cpu))
>>> +             return -ENODEV;
>>> +
>>> +     irq = get_pmc_irq();
>>> +     flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
>>> +     if (!atomic_inc_not_zero(&active_events)) {
>>> +             mutex_lock(&pmu_reserve_mutex);
>>> +             if (atomic_read(&active_events) == 0) {
>>> +                     r = request_irq(irq, pmu_handle_irq,
>>> +                                     flags, "Perf_PMU", &loongarch_pmu);
>>> +                     if (r < 0) {
>>> +                             pr_warn("PMU IRQ request failed\n");
>>> +                             return -ENODEV;
>>> +                     }
>>> +             }
>>> +             atomic_inc(&active_events);
>>> +             mutex_unlock(&pmu_reserve_mutex);
>>> +     }
>>> +
>>> +     return __hw_perf_event_init(event);
>>> +}
>>> +
>>> +static struct pmu pmu = {
>>> +     .pmu_enable     = loongarch_pmu_enable,
>>> +     .pmu_disable    = loongarch_pmu_disable,
>>> +     .event_init     = loongarch_pmu_event_init,
>>> +     .add            = loongarch_pmu_add,
>>> +     .del            = loongarch_pmu_del,
>>> +     .start          = loongarch_pmu_start,
>>> +     .stop           = loongarch_pmu_stop,
>>> +     .read           = loongarch_pmu_read,
>>> +};
>>> +
>>> +static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
>>> +{
>>> +     return (pev->event_id & 0xff);
>>> +}
>>> +
>>> +static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
>>> +{
>>> +     const struct loongarch_perf_event *pev;
>>> +
>>> +     pev = &(*loongarch_pmu.general_event_map)[idx];
>>> +
>>> +     if (pev->event_id == HW_OP_UNSUPPORTED)
>>> +             return ERR_PTR(-ENOENT);
>>> +
>>> +     return pev;
>>> +}
>>> +
>>> +static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
>>> +{
>>> +     unsigned int cache_type, cache_op, cache_result;
>>> +     const struct loongarch_perf_event *pev;
>>> +
>>> +     cache_type = (config >> 0) & 0xff;
>>> +     if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
>>> +             return ERR_PTR(-EINVAL);
>>> +
>>> +     cache_op = (config >> 8) & 0xff;
>>> +     if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
>>> +             return ERR_PTR(-EINVAL);
>>> +
>>> +     cache_result = (config >> 16) & 0xff;
>>> +     if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
>>> +             return ERR_PTR(-EINVAL);
>>> +
>>> +     pev = &((*loongarch_pmu.cache_event_map)
>>> +                                     [cache_type]
>>> +                                     [cache_op]
>>> +                                     [cache_result]);
>>> +
>>> +     if (pev->event_id == CACHE_OP_UNSUPPORTED)
>>> +             return ERR_PTR(-ENOENT);
>>> +
>>> +     return pev;
>>> +}
>>> +
>>> +static int validate_group(struct perf_event *event)
>>> +{
>>> +     struct perf_event *sibling, *leader = event->group_leader;
>>> +     struct cpu_hw_events fake_cpuc;
>>> +
>>> +     memset(&fake_cpuc, 0, sizeof(fake_cpuc));
>>> +
>>> +     if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
>>> +             return -EINVAL;
>>> +
>>> +     for_each_sibling_event(sibling, leader) {
>>> +             if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
>>> +                     return -EINVAL;
>>> +     }
>>> +
>>> +     if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
>>> +             return -EINVAL;
>>> +
>>> +     return 0;
>>> +}
>>> +
>>> +static void reset_counters(void *arg)
>>> +{
>>> +     int counters = (int)(long)arg;
>>> +
>>> +     switch (counters) {
>>> +     case 4:
>>> +             loongarch_pmu_write_control(3, 0);
>>> +             loongarch_pmu.write_counter(3, 0);
>>> +             fallthrough;
>>> +     case 3:
>>> +             loongarch_pmu_write_control(2, 0);
>>> +             loongarch_pmu.write_counter(2, 0);
>>> +             fallthrough;
>>> +     case 2:
>>> +             loongarch_pmu_write_control(1, 0);
>>> +             loongarch_pmu.write_counter(1, 0);
>>> +             fallthrough;
>>> +     case 1:
>>> +             loongarch_pmu_write_control(0, 0);
>>> +             loongarch_pmu.write_counter(0, 0);
>>> +     }
>>> +}
>>> +
>>> +static const struct loongarch_perf_event loongson_new_event_map[PERF_COUNT_HW_MAX] = {
>>> +     PERF_MAP_ALL_UNSUPPORTED,
>>> +     [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
>>> +     [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
>>> +     [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
>>> +     [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
>>> +     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
>>> +     [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
>>> +};
>>> +
>>> +static const struct loongarch_perf_event loongson_new_cache_map
>>> +                             [PERF_COUNT_HW_CACHE_MAX]
>>> +                             [PERF_COUNT_HW_CACHE_OP_MAX]
>>> +                             [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
>>> +PERF_CACHE_MAP_ALL_UNSUPPORTED,
>>> +[C(L1D)] = {
>>> +     /*
>>> +      * Like some other architectures (e.g. ARM), the performance
>>> +      * counters don't differentiate between read and write
>>> +      * accesses/misses, so this isn't strictly correct, but it's the
>>> +      * best we can do. Writes and reads get combined.
>>> +      */
>>> +     [C(OP_READ)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0x8 },
>>> +             [C(RESULT_MISS)]        = { 0x9 },
>>> +     },
>>> +     [C(OP_WRITE)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0x8 },
>>> +             [C(RESULT_MISS)]        = { 0x9 },
>>> +     },
>>> +     [C(OP_PREFETCH)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0xaa },
>>> +             [C(RESULT_MISS)]        = { 0xa9 },
>>> +     },
>>> +},
>>> +[C(L1I)] = {
>>> +     [C(OP_READ)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0x6 },
>>> +             [C(RESULT_MISS)]        = { 0x7 },
>>> +     },
>>> +},
>>> +[C(LL)] = {
>>> +     [C(OP_READ)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0xc },
>>> +             [C(RESULT_MISS)]        = { 0xd },
>>> +     },
>>> +     [C(OP_WRITE)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0xc },
>>> +             [C(RESULT_MISS)]        = { 0xd },
>>> +     },
>>> +},
>>> +[C(ITLB)] = {
>>> +     [C(OP_READ)] = {
>>> +             [C(RESULT_MISS)]    = { 0x3b },
>>> +     },
>>> +},
>>> +[C(DTLB)] = {
>>> +     [C(OP_READ)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0x4 },
>>> +             [C(RESULT_MISS)]        = { 0x3c },
>>> +     },
>>> +     [C(OP_WRITE)] = {
>>> +             [C(RESULT_ACCESS)]      = { 0x4 },
>>> +             [C(RESULT_MISS)]        = { 0x3c },
>>> +     },
>>> +},
>>> +[C(BPU)] = {
>>> +     /* Using the same code for *HW_BRANCH* */
>>> +     [C(OP_READ)] = {
>>> +             [C(RESULT_ACCESS)]  = { 0x02 },
>>> +             [C(RESULT_MISS)]    = { 0x03 },
>>> +     },
>>> +},
>>> +};
>>> +
>>> +static int __hw_perf_event_init(struct perf_event *event)
>>> +{
>>> +     struct perf_event_attr *attr = &event->attr;
>>> +     struct hw_perf_event *hwc = &event->hw;
>>> +     const struct loongarch_perf_event *pev;
>>> +     int err;
>>> +
>>> +     /* Returning LoongArch event descriptor for generic perf event. */
>>> +     if (PERF_TYPE_HARDWARE == event->attr.type) {
>>> +             if (event->attr.config >= PERF_COUNT_HW_MAX)
>>> +                     return -EINVAL;
>>> +             pev = loongarch_pmu_map_general_event(event->attr.config);
>>> +     } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
>>> +             pev = loongarch_pmu_map_cache_event(event->attr.config);
>>> +     } else if (PERF_TYPE_RAW == event->attr.type) {
>>> +             /* We are working on the global raw event. */
>>> +             mutex_lock(&raw_event_mutex);
>>> +             pev = loongarch_pmu.map_raw_event(event->attr.config);
>>> +     } else {
>>> +             /* The event type is not (yet) supported. */
>>> +             return -EOPNOTSUPP;
>>> +     }
>>> +
>>> +     if (IS_ERR(pev)) {
>>> +             if (PERF_TYPE_RAW == event->attr.type)
>>> +                     mutex_unlock(&raw_event_mutex);
>>> +             return PTR_ERR(pev);
>>> +     }
>>> +
>>> +     /*
>>> +      * We allow max flexibility on how each individual counter shared
>>> +      * by the single CPU operates (the mode exclusion and the range).
>>> +      */
>>> +     hwc->config_base = CSR_PERFCTRL_IE;
>>> +
>>> +     hwc->event_base = loongarch_pmu_perf_event_encode(pev);
>>> +     if (PERF_TYPE_RAW == event->attr.type)
>>> +             mutex_unlock(&raw_event_mutex);
>>> +
>>> +     if (!attr->exclude_user) {
>>> +             hwc->config_base |= CSR_PERFCTRL_PLV3;
>>> +             hwc->config_base |= CSR_PERFCTRL_PLV2;
>>> +     }
>>> +     if (!attr->exclude_kernel) {
>>> +             hwc->config_base |= CSR_PERFCTRL_PLV0;
>>> +     }
>>> +     if (!attr->exclude_hv) {
>>> +             hwc->config_base |= CSR_PERFCTRL_PLV1;
>>> +     }
>>> +
>>> +     hwc->config_base &= M_PERFCTL_CONFIG_MASK;
>>> +     /*
>>> +      * The event can belong to another cpu. We do not assign a local
>>> +      * counter for it for now.
>>> +      */
>>> +     hwc->idx = -1;
>>> +     hwc->config = 0;
>>> +
>>> +     if (!hwc->sample_period) {
>>> +             hwc->sample_period  = loongarch_pmu.max_period;
>>> +             hwc->last_period    = hwc->sample_period;
>>> +             local64_set(&hwc->period_left, hwc->sample_period);
>>> +     }
>>> +
>>> +     err = 0;
>>> +     if (event->group_leader != event)
>>> +             err = validate_group(event);
>>> +
>>> +     event->destroy = hw_perf_event_destroy;
>>> +
>>> +     if (err)
>>> +             event->destroy(event);
>>> +
>>> +     return err;
>>> +}
>>> +
>>> +static void pause_local_counters(void)
>>> +{
>>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>> +     int ctr = loongarch_pmu.num_counters;
>>> +     unsigned long flags;
>>> +
>>> +     local_irq_save(flags);
>>> +     do {
>>> +             ctr--;
>>> +             cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
>>> +             loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
>>> +                                      ~M_PERFCTL_COUNT_EVENT_WHENEVER);
>>> +     } while (ctr > 0);
>>> +     local_irq_restore(flags);
>>> +}
>>> +
>>> +static void resume_local_counters(void)
>>> +{
>>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>> +     int ctr = loongarch_pmu.num_counters;
>>> +
>>> +     do {
>>> +             ctr--;
>>> +             loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
>>> +     } while (ctr > 0);
>>> +}
>>> +
>>> +static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
>>> +{
>>> +     raw_event.event_id = config & 0xff;
>>> +
>>> +     return &raw_event;
>>> +}
>>> +
>>> +static int __init
>>> +init_hw_perf_events(void)
>>> +{
>>> +     int counters = 4;
>>> +
>>> +     if (!cpu_has_pmp)
>>> +             return -ENODEV;
>>> +
>>> +     pr_info("Performance counters: ");
>>> +
>>> +     loongarch_pmu.num_counters = counters;
>>> +     loongarch_pmu.max_period = (1ULL << 63) - 1;
>>> +     loongarch_pmu.valid_count = (1ULL << 63) - 1;
>>> +     loongarch_pmu.overflow = 1ULL << 63;
>>> +     loongarch_pmu.name = "loongarch/loongson64";
>>> +     loongarch_pmu.read_counter = loongarch_pmu_read_counter;
>>> +     loongarch_pmu.write_counter = loongarch_pmu_write_counter;
>>> +     loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
>>> +     loongarch_pmu.general_event_map = &loongson_new_event_map;
>>> +     loongarch_pmu.cache_event_map = &loongson_new_cache_map;
>>> +
>>> +     on_each_cpu(reset_counters, (void *)(long)counters, 1);
>>> +
>>> +     pr_cont("%s PMU enabled, %d %d-bit counters available to each "
>>> +             "CPU.\n", loongarch_pmu.name, counters, 64);
>>> +
>>> +     perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
>>> +
>>> +     return 0;
>>> +}
>>> +early_initcall(init_hw_perf_events);
>>> diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c
>>> new file mode 100644
>>> index 000000000000..a5e9768e8414
>>> --- /dev/null
>>> +++ b/arch/loongarch/kernel/perf_regs.c
>>> @@ -0,0 +1,50 @@
>>> +// SPDX-License-Identifier: GPL-2.0
>>> +/*
>>> + * Copyright (C) 2022 Loongson Technology Corporation Limited
>> And this file.
> OK, thanks.
>
> Huacai
>>> + */
>>> +
>>> +#include <linux/perf_event.h>
>>> +
>>> +#include <asm/ptrace.h>
>>> +
>>> +#ifdef CONFIG_32BIT
>>> +u64 perf_reg_abi(struct task_struct *tsk)
>>> +{
>>> +     return PERF_SAMPLE_REGS_ABI_32;
>>> +}
>>> +#else /* Must be CONFIG_64BIT */
>>> +u64 perf_reg_abi(struct task_struct *tsk)
>>> +{
>>> +     if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
>>> +             return PERF_SAMPLE_REGS_ABI_32;
>>> +     else
>>> +             return PERF_SAMPLE_REGS_ABI_64;
>>> +}
>>> +#endif /* CONFIG_32BIT */
>>> +
>>> +int perf_reg_validate(u64 mask)
>>> +{
>>> +     if (!mask)
>>> +             return -EINVAL;
>>> +     if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
>>> +             return -EINVAL;
>>> +     return 0;
>>> +}
>>> +
>>> +u64 perf_reg_value(struct pt_regs *regs, int idx)
>>> +{
>>> +     if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
>>> +             return 0;
>>> +
>>> +     if ((u32)idx == PERF_REG_LOONGARCH_PC)
>>> +             return regs->csr_era;
>>> +
>>> +     return regs->regs[idx];
>>> +}
>>> +
>>> +void perf_get_regs_user(struct perf_regs *regs_user,
>>> +                     struct pt_regs *regs)
>>> +{
>>> +     regs_user->regs = task_pt_regs(current);
>>> +     regs_user->abi = perf_reg_abi(current);
>>> +}
>> --
>> WANG "xen0n" Xuerui
>>
>> Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/
>>


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] LoongArch: Add perf events support
  2022-08-16 10:07     ` Qi Hu
@ 2022-08-16 10:24       ` Huacai Chen
  0 siblings, 0 replies; 7+ messages in thread
From: Huacai Chen @ 2022-08-16 10:24 UTC (permalink / raw)
  To: Qi Hu
  Cc: WANG Xuerui, Huacai Chen, Arnd Bergmann, Peter Zijlstra,
	Ingo Molnar, Arnaldo Carvalho de Melo, Mark Rutland,
	Alexander Shishkin, Jiri Olsa, Namhyung Kim, loongarch,
	linux-arch, Xuefeng Li, Guo Ren, Jiaxun Yang, linux-perf-users,
	LKML

On Tue, Aug 16, 2022 at 6:07 PM Qi Hu <huqi@loongson.cn> wrote:
>
>
> On 2022/8/16 16:18, Huacai Chen wrote:
> >   Hi, Xuerui,
> >
> > On Tue, Aug 16, 2022 at 1:46 PM WANG Xuerui <kernel@xen0n.name> wrote:
> >> On 2022/8/15 20:47, Huacai Chen wrote:
> >>> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
> >>> ---
> >>>    arch/loongarch/Kconfig                      |   2 +
> >>>    arch/loongarch/include/uapi/asm/perf_regs.h |  40 +
> >>>    arch/loongarch/kernel/Makefile              |   2 +
> >>>    arch/loongarch/kernel/perf_event.c          | 909 ++++++++++++++++++++
> >>>    arch/loongarch/kernel/perf_regs.c           |  50 ++
> >>>    5 files changed, 1003 insertions(+)
> >>>    create mode 100644 arch/loongarch/include/uapi/asm/perf_regs.h
> >>>    create mode 100644 arch/loongarch/kernel/perf_event.c
> >>>    create mode 100644 arch/loongarch/kernel/perf_regs.c
> >> The code seems mostly ripped from arch/mips/kernel/perf_event_mipsxx.c.
> >> I reviewed about half of the code then suddenly realized I might be
> >> looking at MIPS code, given some of the English strings there seemed way
> >> too "natural"...
> >>
> >> But unfortunately, at least for 3A5000 whose micro-architecture is
> >> largely shared with the MIPS-implementing 3A4000, it seems inevitable to
> >> involve some of the more MIPS-looking logic. The 1st-generation LA
> >> privileged architecture is way too much MIPS-like after all, so if we
> >> want any support for the 3A5000 we'd have to include this.
> >>
> >>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> >>> index 24665808cf3d..9478f9646fa5 100644
> >>> --- a/arch/loongarch/Kconfig
> >>> +++ b/arch/loongarch/Kconfig
> >>> @@ -93,6 +93,8 @@ config LOONGARCH
> >>>        select HAVE_NMI
> >>>        select HAVE_PCI
> >>>        select HAVE_PERF_EVENTS
> >>> +     select HAVE_PERF_REGS
> >>> +     select HAVE_PERF_USER_STACK_DUMP
> >>>        select HAVE_REGS_AND_STACK_ACCESS_API
> >>>        select HAVE_RSEQ
> >>>        select HAVE_SETUP_PER_CPU_AREA if NUMA
> >>> diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h
> >>> new file mode 100644
> >>> index 000000000000..9943d418e01d
> >>> --- /dev/null
> >>> +++ b/arch/loongarch/include/uapi/asm/perf_regs.h
> >>> @@ -0,0 +1,40 @@
> >>> +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
> >>> +#ifndef _ASM_LOONGARCH_PERF_REGS_H
> >>> +#define _ASM_LOONGARCH_PERF_REGS_H
> >>> +
> >>> +enum perf_event_loongarch_regs {
> >>> +     PERF_REG_LOONGARCH_PC,
> >>> +     PERF_REG_LOONGARCH_R1,
> >>> +     PERF_REG_LOONGARCH_R2,
> >>> +     PERF_REG_LOONGARCH_R3,
> >>> +     PERF_REG_LOONGARCH_R4,
> >>> +     PERF_REG_LOONGARCH_R5,
> >>> +     PERF_REG_LOONGARCH_R6,
> >>> +     PERF_REG_LOONGARCH_R7,
> >>> +     PERF_REG_LOONGARCH_R8,
> >>> +     PERF_REG_LOONGARCH_R9,
> >>> +     PERF_REG_LOONGARCH_R10,
> >>> +     PERF_REG_LOONGARCH_R11,
> >>> +     PERF_REG_LOONGARCH_R12,
> >>> +     PERF_REG_LOONGARCH_R13,
> >>> +     PERF_REG_LOONGARCH_R14,
> >>> +     PERF_REG_LOONGARCH_R15,
> >>> +     PERF_REG_LOONGARCH_R16,
> >>> +     PERF_REG_LOONGARCH_R17,
> >>> +     PERF_REG_LOONGARCH_R18,
> >>> +     PERF_REG_LOONGARCH_R19,
> >>> +     PERF_REG_LOONGARCH_R20,
> >>> +     PERF_REG_LOONGARCH_R21,
> >>> +     PERF_REG_LOONGARCH_R22,
> >>> +     PERF_REG_LOONGARCH_R23,
> >>> +     PERF_REG_LOONGARCH_R24,
> >>> +     PERF_REG_LOONGARCH_R25,
> >>> +     PERF_REG_LOONGARCH_R26,
> >>> +     PERF_REG_LOONGARCH_R27,
> >>> +     PERF_REG_LOONGARCH_R28,
> >>> +     PERF_REG_LOONGARCH_R29,
> >>> +     PERF_REG_LOONGARCH_R30,
> >>> +     PERF_REG_LOONGARCH_R31,
> >>> +     PERF_REG_LOONGARCH_MAX = PERF_REG_LOONGARCH_R31 + 1,
> >> No need for this "PERF_REG_LOONGARCH_R31 + 1" because it's what happens
> >> without the assignment anyway?
> > PERF_REG_LOONGARCH_MAX is used in perf_events.c
>
> I think Xuerui wants to say "PERF_REG_LOONGARCH_MAX" is enough, and "=
> PERF_REG_LOONGARCH_R31 + 1" can be removed.
Yes, you are right, I'm sorry.

Huacai
>
> Qi
>
> >>> +};
> >>> +#endif /* _ASM_LOONGARCH_PERF_REGS_H */
> >>> diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
> >>> index e5be17009fe8..a213e994db68 100644
> >>> --- a/arch/loongarch/kernel/Makefile
> >>> +++ b/arch/loongarch/kernel/Makefile
> >>> @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA)          += numa.o
> >>>    obj-$(CONFIG_UNWINDER_GUESS)        += unwind_guess.o
> >>>    obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
> >>>
> >>> +obj-$(CONFIG_PERF_EVENTS)    += perf_event.o perf_regs.o
> >>> +
> >>>    CPPFLAGS_vmlinux.lds                := $(KBUILD_CFLAGS)
> >>> diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
> >>> new file mode 100644
> >>> index 000000000000..00cdbcebaf80
> >>> --- /dev/null
> >>> +++ b/arch/loongarch/kernel/perf_event.c
> >>> @@ -0,0 +1,909 @@
> >>> +// SPDX-License-Identifier: GPL-2.0
> >>> +/*
> >>> + * Linux performance counter support for LoongArch.
> >> Please indicate its MIPS origin and copyright info ;-)
> > OK, thanks.
> >
> >>> + *
> >>> + * Copyright (C) 2022 Loongson Technology Corporation Limited
> >>> + */
> >>> +
> >>> +#include <linux/cpumask.h>
> >>> +#include <linux/interrupt.h>
> >>> +#include <linux/smp.h>
> >>> +#include <linux/kernel.h>
> >>> +#include <linux/perf_event.h>
> >>> +#include <linux/uaccess.h>
> >>> +#include <linux/sched/task_stack.h>
> >>> +
> >>> +#include <asm/irq.h>
> >>> +#include <asm/irq_regs.h>
> >>> +#include <asm/stacktrace.h>
> >>> +#include <asm/unwind.h>
> >>> +
> >>> +/*
> >>> + * Get the return address for a single stackframe and return a pointer to the
> >>> + * next frame tail.
> >>> + */
> >>> +static unsigned long
> >>> +user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
> >>> +{
> >>> +     struct stack_frame buftail;
> >>> +     unsigned long err;
> >>> +     unsigned long __user *user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
> >>> +
> >>> +     /* Also check accessibility of one struct frame_tail beyond */
> >>> +     if (!access_ok(user_frame_tail, sizeof(buftail)))
> >>> +             return 0;
> >>> +
> >>> +     pagefault_disable();
> >>> +     err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
> >>> +     pagefault_enable();
> >>> +
> >>> +     if (err || (unsigned long)user_frame_tail >= buftail.fp)
> >>> +             return 0;
> >>> +
> >>> +     perf_callchain_store(entry, buftail.ra);
> >>> +
> >>> +     return buftail.fp;
> >>> +}
> >>> +
> >>> +void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
> >>> +                      struct pt_regs *regs)
> >>> +{
> >>> +     unsigned long fp;
> >>> +
> >>> +     if (perf_guest_state()) {
> >>> +             /* We don't support guest os callchain now */
> >>> +             return;
> >>> +     }
> >>> +
> >>> +     perf_callchain_store(entry, regs->csr_era);
> >>> +
> >>> +     fp = regs->regs[22];
> >>> +
> >>> +     while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
> >>> +             fp = user_backtrace(entry, fp);
> >>> +}
> >>> +
> >>> +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
> >>> +                        struct pt_regs *regs)
> >>> +{
> >>> +     struct unwind_state state;
> >>> +     unsigned long addr;
> >>> +
> >>> +     for (unwind_start(&state, current, regs);
> >>> +           !unwind_done(&state); unwind_next_frame(&state)) {
> >>> +             addr = unwind_get_return_address(&state);
> >>> +             if (!addr || perf_callchain_store(entry, addr))
> >>> +                     return;
> >>> +     }
> >>> +}
> >>> +
> >>> +#define LOONGARCH_MAX_HWEVENTS 4
> >>> +
> >>> +struct cpu_hw_events {
> >>> +     /* Array of events on this cpu. */
> >>> +     struct perf_event       *events[LOONGARCH_MAX_HWEVENTS];
> >>> +
> >>> +     /*
> >>> +      * Set the bit (indexed by the counter number) when the counter
> >>> +      * is used for an event.
> >>> +      */
> >>> +     unsigned long           used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
> >>> +
> >>> +     /*
> >>> +      * Software copy of the control register for each performance counter.
> >>> +      * LoongArch CPUs vary in performance counters. They use this differently,
> >>> +      * and even may not use it.
> >> I can't easily make sense of the paragraph. "Software copy" could mean
> >> "Saved copy", but how do "use differently" and "even may not use it"
> >> mean? For the latter I can't deduce if it's originally "some even may
> >> not exist" in someone's head, and for the former I can't imagine what's
> >> the possible cases and why we would care.
> >>
> >> Maybe explain a little bit more?
> > I think we should remove those useless lines.
> >
> >>> +      */
> >>> +     unsigned int            saved_ctrl[LOONGARCH_MAX_HWEVENTS];
> >>> +};
> >>> +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
> >>> +     .saved_ctrl = {0},
> >>> +};
> >>> +
> >>> +/* The description of LoongArch performance events. */
> >>> +struct loongarch_perf_event {
> >>> +     unsigned int event_id;
> >>> +};
> >>> +
> >>> +static struct loongarch_perf_event raw_event;
> >>> +static DEFINE_MUTEX(raw_event_mutex);
> >>> +
> >>> +#define C(x) PERF_COUNT_HW_CACHE_##x
> >>> +#define HW_OP_UNSUPPORTED            0xffffffff
> >>> +#define CACHE_OP_UNSUPPORTED         0xffffffff
> >>> +
> >>> +#define PERF_MAP_ALL_UNSUPPORTED                                     \
> >>> +     [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
> >>> +
> >>> +#define PERF_CACHE_MAP_ALL_UNSUPPORTED                                       \
> >>> +[0 ... C(MAX) - 1] = {                                                       \
> >>> +     [0 ... C(OP_MAX) - 1] = {                                       \
> >>> +             [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED},     \
> >>> +     },                                                              \
> >>> +}
> >>> +
> >>> +struct loongarch_pmu {
> >>> +     u64             max_period;
> >>> +     u64             valid_count;
> >>> +     u64             overflow;
> >>> +     const char      *name;
> >>> +     u64             (*read_counter)(unsigned int idx);
> >>> +     void            (*write_counter)(unsigned int idx, u64 val);
> >>> +     const struct loongarch_perf_event *(*map_raw_event)(u64 config);
> >>> +     const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
> >>> +     const struct loongarch_perf_event (*cache_event_map)
> >>> +                             [PERF_COUNT_HW_CACHE_MAX]
> >>> +                             [PERF_COUNT_HW_CACHE_OP_MAX]
> >>> +                             [PERF_COUNT_HW_CACHE_RESULT_MAX];
> >> Apparently general_event_map and cache_event_map are not function
> >> pointers? So the parens around the field name should be removed.
> > They are not function pointers, but "array pointers", but we shouldn't
> > change them to "array to pointers"..
> >
> >>> +     unsigned int    num_counters;
> >>> +};
> >>> +
> >>> +static struct loongarch_pmu loongarch_pmu;
> >>> +
> >>> +#define M_PERFCTL_EVENT(event)       (event & CSR_PERFCTRL_EVENT)
> >>> +
> >>> +#define M_PERFCTL_COUNT_EVENT_WHENEVER       (CSR_PERFCTRL_PLV0 |    \
> >>> +                                     CSR_PERFCTRL_PLV1 |     \
> >>> +                                     CSR_PERFCTRL_PLV2 |     \
> >>> +                                     CSR_PERFCTRL_PLV3 |     \
> >>> +                                     CSR_PERFCTRL_IE)
> >>> +
> >>> +#define M_PERFCTL_CONFIG_MASK                0x1f0000
> >>> +
> >>> +#define CNTR_BIT_MASK(n)     (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
> >> Isn't this just GENMASK(n - 1, 0)?
> > Yes, you are right.
> >
> >>> +
> >>> +static void resume_local_counters(void);
> >>> +static void pause_local_counters(void);
> >>> +
> >>> +static u64 loongarch_pmu_read_counter(unsigned int idx)
> >>> +{
> >>> +     u64 val = -1;
> >>> +
> >>> +     switch (idx) {
> >>> +     case 0:
> >>> +             val = read_csr_perfcntr0();
> >>> +             break;
> >>> +     case 1:
> >>> +             val = read_csr_perfcntr1();
> >>> +             break;
> >>> +     case 2:
> >>> +             val = read_csr_perfcntr2();
> >>> +             break;
> >>> +     case 3:
> >>> +             val = read_csr_perfcntr3();
> >>> +             break;
> >>> +     default:
> >>> +             WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
> >>> +             return 0;
> >>> +     }
> >>> +
> >>> +     return val;
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
> >>> +{
> >>> +     switch (idx) {
> >>> +     case 0:
> >>> +             write_csr_perfcntr0(val);
> >>> +             return;
> >>> +     case 1:
> >>> +             write_csr_perfcntr1(val);
> >>> +             return;
> >>> +     case 2:
> >>> +             write_csr_perfcntr2(val);
> >>> +             return;
> >>> +     case 3:
> >>> +             write_csr_perfcntr3(val);
> >>> +             return;
> >> Want a default branch for this function, similar to the read case?
> > Yes, that is needed.
> >
> >>> +     }
> >>> +}
> >>> +
> >>> +static unsigned int loongarch_pmu_read_control(unsigned int idx)
> >>> +{
> >>> +     unsigned int val = -1;
> >>> +
> >>> +     switch (idx) {
> >>> +     case 0:
> >>> +             val = read_csr_perfctrl0();
> >>> +             break;
> >>> +     case 1:
> >>> +             val = read_csr_perfctrl1();
> >>> +             break;
> >>> +     case 2:
> >>> +             val = read_csr_perfctrl2();
> >>> +             break;
> >>> +     case 3:
> >>> +             val = read_csr_perfctrl3();
> >>> +             break;
> >>> +     default:
> >>> +             WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
> >>> +             return 0;
> >>> +     }
> >>> +
> >>> +     return val;
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
> >>> +{
> >>> +     switch (idx) {
> >>> +     case 0:
> >>> +             write_csr_perfctrl0(val);
> >>> +             return;
> >>> +     case 1:
> >>> +             write_csr_perfctrl1(val);
> >>> +             return;
> >>> +     case 2:
> >>> +             write_csr_perfctrl2(val);
> >>> +             return;
> >>> +     case 3:
> >>> +             write_csr_perfctrl3(val);
> >>> +             return;
> >> Similarly here.
> >>
> >>> +     }
> >>> +}
> >>> +
> >>> +static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc,
> >>> +                                 struct hw_perf_event *hwc)
> >>> +{
> >>> +     int i;
> >>> +
> >>> +     for (i = loongarch_pmu.num_counters - 1; i >= 0; i--) {
> >>> +             if (!test_and_set_bit(i, cpuc->used_mask))
> >>> +                     return i;
> >>> +     }
> >>> +
> >>> +     return -EAGAIN;
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
> >>> +{
> >>> +     struct perf_event *event = container_of(evt, struct perf_event, hw);
> >>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> >>> +     unsigned int cpu;
> >>> +
> >>> +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> >>> +
> >>> +     cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
> >>> +             (evt->config_base & M_PERFCTL_CONFIG_MASK) |
> >>> +             /* Make sure interrupt enabled. */
> >>> +             CSR_PERFCTRL_IE;
> >>> +
> >>> +     cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
> >>> +
> >>> +     pr_debug("Enabling perf counter for CPU%d\n", cpu);
> >>> +     /*
> >>> +      * We do not actually let the counter run. Leave it until start().
> >>> +      */
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_disable_event(int idx)
> >>> +{
> >>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> >>> +     unsigned long flags;
> >>> +
> >>> +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> >>> +
> >>> +     local_irq_save(flags);
> >>> +     cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
> >>> +             ~M_PERFCTL_COUNT_EVENT_WHENEVER;
> >>> +     loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
> >>> +     local_irq_restore(flags);
> >>> +}
> >>> +
> >>> +static int loongarch_pmu_event_set_period(struct perf_event *event,
> >>> +                                 struct hw_perf_event *hwc,
> >>> +                                 int idx)
> >>> +{
> >>> +     u64 left = local64_read(&hwc->period_left);
> >>> +     u64 period = hwc->sample_period;
> >>> +     int ret = 0;
> >>> +
> >>> +     if (unlikely((left + period) & (1ULL << 63))) {
> >>> +             /* left underflowed by more than period. */
> >>> +             left = period;
> >>> +             local64_set(&hwc->period_left, left);
> >>> +             hwc->last_period = period;
> >>> +             ret = 1;
> >>> +     } else  if (unlikely((left + period) <= period)) {
> >>> +             /* left underflowed by less than period. */
> >>> +             left += period;
> >>> +             local64_set(&hwc->period_left, left);
> >>> +             hwc->last_period = period;
> >>> +             ret = 1;
> >>> +     }
> >>> +
> >>> +     if (left > loongarch_pmu.max_period) {
> >>> +             left = loongarch_pmu.max_period;
> >>> +             local64_set(&hwc->period_left, left);
> >>> +     }
> >>> +
> >>> +     local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
> >>> +
> >>> +     loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
> >>> +
> >>> +     perf_event_update_userpage(event);
> >>> +
> >>> +     return ret;
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_event_update(struct perf_event *event,
> >>> +                              struct hw_perf_event *hwc,
> >>> +                              int idx)
> >>> +{
> >>> +     u64 delta;
> >>> +     u64 prev_raw_count, new_raw_count;
> >>> +
> >>> +again:
> >>> +     prev_raw_count = local64_read(&hwc->prev_count);
> >>> +     new_raw_count = loongarch_pmu.read_counter(idx);
> >>> +
> >>> +     if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
> >>> +                             new_raw_count) != prev_raw_count)
> >>> +             goto again;
> >>> +
> >>> +     delta = new_raw_count - prev_raw_count;
> >>> +
> >>> +     local64_add(delta, &event->count);
> >>> +     local64_sub(delta, &hwc->period_left);
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_start(struct perf_event *event, int flags)
> >>> +{
> >>> +     struct hw_perf_event *hwc = &event->hw;
> >>> +
> >>> +     if (flags & PERF_EF_RELOAD)
> >>> +             WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
> >>> +
> >>> +     hwc->state = 0;
> >>> +
> >>> +     /* Set the period for the event. */
> >>> +     loongarch_pmu_event_set_period(event, hwc, hwc->idx);
> >>> +
> >>> +     /* Enable the event. */
> >>> +     loongarch_pmu_enable_event(hwc, hwc->idx);
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_stop(struct perf_event *event, int flags)
> >>> +{
> >>> +     struct hw_perf_event *hwc = &event->hw;
> >>> +
> >>> +     if (!(hwc->state & PERF_HES_STOPPED)) {
> >>> +             /* We are working on a local event. */
> >>> +             loongarch_pmu_disable_event(hwc->idx);
> >>> +             barrier();
> >>> +             loongarch_pmu_event_update(event, hwc, hwc->idx);
> >>> +             hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> >>> +     }
> >>> +}
> >>> +
> >>> +static int loongarch_pmu_add(struct perf_event *event, int flags)
> >>> +{
> >>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> >>> +     struct hw_perf_event *hwc = &event->hw;
> >>> +     int idx;
> >>> +     int err = 0;
> >>> +
> >>> +     perf_pmu_disable(event->pmu);
> >>> +
> >>> +     /* To look for a free counter for this event. */
> >>> +     idx = loongarch_pmu_alloc_counter(cpuc, hwc);
> >>> +     if (idx < 0) {
> >>> +             err = idx;
> >>> +             goto out;
> >>> +     }
> >>> +
> >>> +     /*
> >>> +      * If there is an event in the counter we are going to use then
> >>> +      * make sure it is disabled.
> >>> +      */
> >>> +     event->hw.idx = idx;
> >>> +     loongarch_pmu_disable_event(idx);
> >>> +     cpuc->events[idx] = event;
> >>> +
> >>> +     hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> >>> +     if (flags & PERF_EF_START)
> >>> +             loongarch_pmu_start(event, PERF_EF_RELOAD);
> >>> +
> >>> +     /* Propagate our changes to the userspace mapping. */
> >>> +     perf_event_update_userpage(event);
> >>> +
> >>> +out:
> >>> +     perf_pmu_enable(event->pmu);
> >>> +     return err;
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_del(struct perf_event *event, int flags)
> >>> +{
> >>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> >>> +     struct hw_perf_event *hwc = &event->hw;
> >>> +     int idx = hwc->idx;
> >>> +
> >>> +     WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
> >>> +
> >>> +     loongarch_pmu_stop(event, PERF_EF_UPDATE);
> >>> +     cpuc->events[idx] = NULL;
> >>> +     clear_bit(idx, cpuc->used_mask);
> >>> +
> >>> +     perf_event_update_userpage(event);
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_read(struct perf_event *event)
> >>> +{
> >>> +     struct hw_perf_event *hwc = &event->hw;
> >>> +
> >>> +     /* Don't read disabled counters! */
> >>> +     if (hwc->idx < 0)
> >>> +             return;
> >>> +
> >>> +     loongarch_pmu_event_update(event, hwc, hwc->idx);
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_enable(struct pmu *pmu)
> >>> +{
> >>> +     resume_local_counters();
> >>> +}
> >>> +
> >>> +static void loongarch_pmu_disable(struct pmu *pmu)
> >>> +{
> >>> +     pause_local_counters();
> >>> +}
> >>> +
> >>> +static atomic_t active_events = ATOMIC_INIT(0);
> >>> +static DEFINE_MUTEX(pmu_reserve_mutex);
> >>> +
> >>> +static void reset_counters(void *arg);
> >>> +static int __hw_perf_event_init(struct perf_event *event);
> >>> +
> >>> +static void hw_perf_event_destroy(struct perf_event *event)
> >>> +{
> >>> +     if (atomic_dec_and_mutex_lock(&active_events,
> >>> +                             &pmu_reserve_mutex)) {
> >>> +             /*
> >>> +              * We must not call the destroy function with interrupts
> >>> +              * disabled.
> >>> +              */
> >>> +             on_each_cpu(reset_counters,
> >>> +                     (void *)(long)loongarch_pmu.num_counters, 1);
> >>> +             mutex_unlock(&pmu_reserve_mutex);
> >>> +     }
> >>> +}
> >>> +
> >>> +/* This is needed by specific irq handlers in perf_event_*.c */
> >>> +static void handle_associated_event(struct cpu_hw_events *cpuc,
> >>> +                                 int idx, struct perf_sample_data *data,
> >>> +                                 struct pt_regs *regs)
> >>> +{
> >>> +     struct perf_event *event = cpuc->events[idx];
> >>> +     struct hw_perf_event *hwc = &event->hw;
> >>> +
> >>> +     loongarch_pmu_event_update(event, hwc, idx);
> >>> +     data->period = event->hw.last_period;
> >>> +     if (!loongarch_pmu_event_set_period(event, hwc, idx))
> >>> +             return;
> >>> +
> >>> +     if (perf_event_overflow(event, data, regs))
> >>> +             loongarch_pmu_disable_event(idx);
> >>> +}
> >>> +
> >>> +static irqreturn_t pmu_handle_irq(int irq, void *dev)
> >>> +{
> >>> +     int handled = IRQ_NONE;
> >>> +     unsigned int counters = loongarch_pmu.num_counters;
> >>> +     u64 counter;
> >>> +     struct pt_regs *regs;
> >>> +     struct perf_sample_data data;
> >>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> >>> +
> >>> +     /*
> >>> +      * First we pause the local counters, so that when we are locked
> >>> +      * here, the counters are all paused. When it gets locked due to
> >>> +      * perf_disable(), the timer interrupt handler will be delayed.
> >>> +      *
> >>> +      * See also loongarch_pmu_start().
> >>> +      */
> >>> +     pause_local_counters();
> >>> +
> >>> +     regs = get_irq_regs();
> >>> +
> >>> +     perf_sample_data_init(&data, 0, 0);
> >>> +
> >>> +     switch (counters) {
> >>> +#define HANDLE_COUNTER(n)                                            \
> >>> +     case n + 1:                                                     \
> >>> +             if (test_bit(n, cpuc->used_mask)) {                     \
> >>> +                     counter = loongarch_pmu.read_counter(n);        \
> >>> +                     if (counter & loongarch_pmu.overflow) {         \
> >>> +                             handle_associated_event(cpuc, n, &data, regs); \
> >>> +                             handled = IRQ_HANDLED;                  \
> >>> +                     }                                               \
> >>> +             }
> >>> +     HANDLE_COUNTER(3)
> >>> +             fallthrough;
> >>> +     HANDLE_COUNTER(2)
> >>> +             fallthrough;
> >>> +     HANDLE_COUNTER(1)
> >>> +             fallthrough;
> >>> +     HANDLE_COUNTER(0)
> >>> +     }
> >>> +
> >>> +     resume_local_counters();
> >>> +
> >>> +     /*
> >>> +      * Do all the work for the pending perf events. We can do this
> >>> +      * in here because the performance counter interrupt is a regular
> >>> +      * interrupt, not NMI.
> >>> +      */
> >>> +     if (handled == IRQ_HANDLED)
> >>> +             irq_work_run();
> >>> +
> >>> +     return handled;
> >>> +}
> >>> +
> >>> +static int get_pmc_irq(void)
> >>> +{
> >>> +     struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
> >>> +
> >>> +     if (d)
> >>> +             return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START);
> >>> +
> >>> +     return -EINVAL;
> >>> +}
> >>> +
> >>> +static int loongarch_pmu_event_init(struct perf_event *event)
> >>> +{
> >>> +     int r, irq;
> >>> +     unsigned long flags;
> >>> +
> >>> +     /* does not support taken branch sampling */
> >>> +     if (has_branch_stack(event))
> >>> +             return -EOPNOTSUPP;
> >>> +
> >>> +     switch (event->attr.type) {
> >>> +     case PERF_TYPE_RAW:
> >>> +     case PERF_TYPE_HARDWARE:
> >>> +     case PERF_TYPE_HW_CACHE:
> >>> +             break;
> >>> +
> >>> +     default:
> >>> +             /* Init it to avoid false validate_group */
> >>> +             event->hw.event_base = 0xffffffff;
> >>> +             return -ENOENT;
> >>> +     }
> >>> +
> >>> +     if (event->cpu >= 0 && !cpu_online(event->cpu))
> >>> +             return -ENODEV;
> >>> +
> >>> +     irq = get_pmc_irq();
> >>> +     flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
> >>> +     if (!atomic_inc_not_zero(&active_events)) {
> >>> +             mutex_lock(&pmu_reserve_mutex);
> >>> +             if (atomic_read(&active_events) == 0) {
> >>> +                     r = request_irq(irq, pmu_handle_irq,
> >>> +                                     flags, "Perf_PMU", &loongarch_pmu);
> >>> +                     if (r < 0) {
> >>> +                             pr_warn("PMU IRQ request failed\n");
> >>> +                             return -ENODEV;
> >>> +                     }
> >>> +             }
> >>> +             atomic_inc(&active_events);
> >>> +             mutex_unlock(&pmu_reserve_mutex);
> >>> +     }
> >>> +
> >>> +     return __hw_perf_event_init(event);
> >>> +}
> >>> +
> >>> +static struct pmu pmu = {
> >>> +     .pmu_enable     = loongarch_pmu_enable,
> >>> +     .pmu_disable    = loongarch_pmu_disable,
> >>> +     .event_init     = loongarch_pmu_event_init,
> >>> +     .add            = loongarch_pmu_add,
> >>> +     .del            = loongarch_pmu_del,
> >>> +     .start          = loongarch_pmu_start,
> >>> +     .stop           = loongarch_pmu_stop,
> >>> +     .read           = loongarch_pmu_read,
> >>> +};
> >>> +
> >>> +static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
> >>> +{
> >>> +     return (pev->event_id & 0xff);
> >>> +}
> >>> +
> >>> +static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
> >>> +{
> >>> +     const struct loongarch_perf_event *pev;
> >>> +
> >>> +     pev = &(*loongarch_pmu.general_event_map)[idx];
> >>> +
> >>> +     if (pev->event_id == HW_OP_UNSUPPORTED)
> >>> +             return ERR_PTR(-ENOENT);
> >>> +
> >>> +     return pev;
> >>> +}
> >>> +
> >>> +static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
> >>> +{
> >>> +     unsigned int cache_type, cache_op, cache_result;
> >>> +     const struct loongarch_perf_event *pev;
> >>> +
> >>> +     cache_type = (config >> 0) & 0xff;
> >>> +     if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
> >>> +             return ERR_PTR(-EINVAL);
> >>> +
> >>> +     cache_op = (config >> 8) & 0xff;
> >>> +     if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
> >>> +             return ERR_PTR(-EINVAL);
> >>> +
> >>> +     cache_result = (config >> 16) & 0xff;
> >>> +     if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
> >>> +             return ERR_PTR(-EINVAL);
> >>> +
> >>> +     pev = &((*loongarch_pmu.cache_event_map)
> >>> +                                     [cache_type]
> >>> +                                     [cache_op]
> >>> +                                     [cache_result]);
> >>> +
> >>> +     if (pev->event_id == CACHE_OP_UNSUPPORTED)
> >>> +             return ERR_PTR(-ENOENT);
> >>> +
> >>> +     return pev;
> >>> +}
> >>> +
> >>> +static int validate_group(struct perf_event *event)
> >>> +{
> >>> +     struct perf_event *sibling, *leader = event->group_leader;
> >>> +     struct cpu_hw_events fake_cpuc;
> >>> +
> >>> +     memset(&fake_cpuc, 0, sizeof(fake_cpuc));
> >>> +
> >>> +     if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
> >>> +             return -EINVAL;
> >>> +
> >>> +     for_each_sibling_event(sibling, leader) {
> >>> +             if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
> >>> +                     return -EINVAL;
> >>> +     }
> >>> +
> >>> +     if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
> >>> +             return -EINVAL;
> >>> +
> >>> +     return 0;
> >>> +}
> >>> +
> >>> +static void reset_counters(void *arg)
> >>> +{
> >>> +     int counters = (int)(long)arg;
> >>> +
> >>> +     switch (counters) {
> >>> +     case 4:
> >>> +             loongarch_pmu_write_control(3, 0);
> >>> +             loongarch_pmu.write_counter(3, 0);
> >>> +             fallthrough;
> >>> +     case 3:
> >>> +             loongarch_pmu_write_control(2, 0);
> >>> +             loongarch_pmu.write_counter(2, 0);
> >>> +             fallthrough;
> >>> +     case 2:
> >>> +             loongarch_pmu_write_control(1, 0);
> >>> +             loongarch_pmu.write_counter(1, 0);
> >>> +             fallthrough;
> >>> +     case 1:
> >>> +             loongarch_pmu_write_control(0, 0);
> >>> +             loongarch_pmu.write_counter(0, 0);
> >>> +     }
> >>> +}
> >>> +
> >>> +static const struct loongarch_perf_event loongson_new_event_map[PERF_COUNT_HW_MAX] = {
> >>> +     PERF_MAP_ALL_UNSUPPORTED,
> >>> +     [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
> >>> +     [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
> >>> +     [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
> >>> +     [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
> >>> +     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
> >>> +     [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
> >>> +};
> >>> +
> >>> +static const struct loongarch_perf_event loongson_new_cache_map
> >>> +                             [PERF_COUNT_HW_CACHE_MAX]
> >>> +                             [PERF_COUNT_HW_CACHE_OP_MAX]
> >>> +                             [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
> >>> +PERF_CACHE_MAP_ALL_UNSUPPORTED,
> >>> +[C(L1D)] = {
> >>> +     /*
> >>> +      * Like some other architectures (e.g. ARM), the performance
> >>> +      * counters don't differentiate between read and write
> >>> +      * accesses/misses, so this isn't strictly correct, but it's the
> >>> +      * best we can do. Writes and reads get combined.
> >>> +      */
> >>> +     [C(OP_READ)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0x8 },
> >>> +             [C(RESULT_MISS)]        = { 0x9 },
> >>> +     },
> >>> +     [C(OP_WRITE)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0x8 },
> >>> +             [C(RESULT_MISS)]        = { 0x9 },
> >>> +     },
> >>> +     [C(OP_PREFETCH)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0xaa },
> >>> +             [C(RESULT_MISS)]        = { 0xa9 },
> >>> +     },
> >>> +},
> >>> +[C(L1I)] = {
> >>> +     [C(OP_READ)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0x6 },
> >>> +             [C(RESULT_MISS)]        = { 0x7 },
> >>> +     },
> >>> +},
> >>> +[C(LL)] = {
> >>> +     [C(OP_READ)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0xc },
> >>> +             [C(RESULT_MISS)]        = { 0xd },
> >>> +     },
> >>> +     [C(OP_WRITE)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0xc },
> >>> +             [C(RESULT_MISS)]        = { 0xd },
> >>> +     },
> >>> +},
> >>> +[C(ITLB)] = {
> >>> +     [C(OP_READ)] = {
> >>> +             [C(RESULT_MISS)]    = { 0x3b },
> >>> +     },
> >>> +},
> >>> +[C(DTLB)] = {
> >>> +     [C(OP_READ)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0x4 },
> >>> +             [C(RESULT_MISS)]        = { 0x3c },
> >>> +     },
> >>> +     [C(OP_WRITE)] = {
> >>> +             [C(RESULT_ACCESS)]      = { 0x4 },
> >>> +             [C(RESULT_MISS)]        = { 0x3c },
> >>> +     },
> >>> +},
> >>> +[C(BPU)] = {
> >>> +     /* Using the same code for *HW_BRANCH* */
> >>> +     [C(OP_READ)] = {
> >>> +             [C(RESULT_ACCESS)]  = { 0x02 },
> >>> +             [C(RESULT_MISS)]    = { 0x03 },
> >>> +     },
> >>> +},
> >>> +};
> >>> +
> >>> +static int __hw_perf_event_init(struct perf_event *event)
> >>> +{
> >>> +     struct perf_event_attr *attr = &event->attr;
> >>> +     struct hw_perf_event *hwc = &event->hw;
> >>> +     const struct loongarch_perf_event *pev;
> >>> +     int err;
> >>> +
> >>> +     /* Returning LoongArch event descriptor for generic perf event. */
> >>> +     if (PERF_TYPE_HARDWARE == event->attr.type) {
> >>> +             if (event->attr.config >= PERF_COUNT_HW_MAX)
> >>> +                     return -EINVAL;
> >>> +             pev = loongarch_pmu_map_general_event(event->attr.config);
> >>> +     } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
> >>> +             pev = loongarch_pmu_map_cache_event(event->attr.config);
> >>> +     } else if (PERF_TYPE_RAW == event->attr.type) {
> >>> +             /* We are working on the global raw event. */
> >>> +             mutex_lock(&raw_event_mutex);
> >>> +             pev = loongarch_pmu.map_raw_event(event->attr.config);
> >>> +     } else {
> >>> +             /* The event type is not (yet) supported. */
> >>> +             return -EOPNOTSUPP;
> >>> +     }
> >>> +
> >>> +     if (IS_ERR(pev)) {
> >>> +             if (PERF_TYPE_RAW == event->attr.type)
> >>> +                     mutex_unlock(&raw_event_mutex);
> >>> +             return PTR_ERR(pev);
> >>> +     }
> >>> +
> >>> +     /*
> >>> +      * We allow max flexibility on how each individual counter shared
> >>> +      * by the single CPU operates (the mode exclusion and the range).
> >>> +      */
> >>> +     hwc->config_base = CSR_PERFCTRL_IE;
> >>> +
> >>> +     hwc->event_base = loongarch_pmu_perf_event_encode(pev);
> >>> +     if (PERF_TYPE_RAW == event->attr.type)
> >>> +             mutex_unlock(&raw_event_mutex);
> >>> +
> >>> +     if (!attr->exclude_user) {
> >>> +             hwc->config_base |= CSR_PERFCTRL_PLV3;
> >>> +             hwc->config_base |= CSR_PERFCTRL_PLV2;
> >>> +     }
> >>> +     if (!attr->exclude_kernel) {
> >>> +             hwc->config_base |= CSR_PERFCTRL_PLV0;
> >>> +     }
> >>> +     if (!attr->exclude_hv) {
> >>> +             hwc->config_base |= CSR_PERFCTRL_PLV1;
> >>> +     }
> >>> +
> >>> +     hwc->config_base &= M_PERFCTL_CONFIG_MASK;
> >>> +     /*
> >>> +      * The event can belong to another cpu. We do not assign a local
> >>> +      * counter for it for now.
> >>> +      */
> >>> +     hwc->idx = -1;
> >>> +     hwc->config = 0;
> >>> +
> >>> +     if (!hwc->sample_period) {
> >>> +             hwc->sample_period  = loongarch_pmu.max_period;
> >>> +             hwc->last_period    = hwc->sample_period;
> >>> +             local64_set(&hwc->period_left, hwc->sample_period);
> >>> +     }
> >>> +
> >>> +     err = 0;
> >>> +     if (event->group_leader != event)
> >>> +             err = validate_group(event);
> >>> +
> >>> +     event->destroy = hw_perf_event_destroy;
> >>> +
> >>> +     if (err)
> >>> +             event->destroy(event);
> >>> +
> >>> +     return err;
> >>> +}
> >>> +
> >>> +static void pause_local_counters(void)
> >>> +{
> >>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> >>> +     int ctr = loongarch_pmu.num_counters;
> >>> +     unsigned long flags;
> >>> +
> >>> +     local_irq_save(flags);
> >>> +     do {
> >>> +             ctr--;
> >>> +             cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
> >>> +             loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
> >>> +                                      ~M_PERFCTL_COUNT_EVENT_WHENEVER);
> >>> +     } while (ctr > 0);
> >>> +     local_irq_restore(flags);
> >>> +}
> >>> +
> >>> +static void resume_local_counters(void)
> >>> +{
> >>> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> >>> +     int ctr = loongarch_pmu.num_counters;
> >>> +
> >>> +     do {
> >>> +             ctr--;
> >>> +             loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
> >>> +     } while (ctr > 0);
> >>> +}
> >>> +
> >>> +static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
> >>> +{
> >>> +     raw_event.event_id = config & 0xff;
> >>> +
> >>> +     return &raw_event;
> >>> +}
> >>> +
> >>> +static int __init
> >>> +init_hw_perf_events(void)
> >>> +{
> >>> +     int counters = 4;
> >>> +
> >>> +     if (!cpu_has_pmp)
> >>> +             return -ENODEV;
> >>> +
> >>> +     pr_info("Performance counters: ");
> >>> +
> >>> +     loongarch_pmu.num_counters = counters;
> >>> +     loongarch_pmu.max_period = (1ULL << 63) - 1;
> >>> +     loongarch_pmu.valid_count = (1ULL << 63) - 1;
> >>> +     loongarch_pmu.overflow = 1ULL << 63;
> >>> +     loongarch_pmu.name = "loongarch/loongson64";
> >>> +     loongarch_pmu.read_counter = loongarch_pmu_read_counter;
> >>> +     loongarch_pmu.write_counter = loongarch_pmu_write_counter;
> >>> +     loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
> >>> +     loongarch_pmu.general_event_map = &loongson_new_event_map;
> >>> +     loongarch_pmu.cache_event_map = &loongson_new_cache_map;
> >>> +
> >>> +     on_each_cpu(reset_counters, (void *)(long)counters, 1);
> >>> +
> >>> +     pr_cont("%s PMU enabled, %d %d-bit counters available to each "
> >>> +             "CPU.\n", loongarch_pmu.name, counters, 64);
> >>> +
> >>> +     perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
> >>> +
> >>> +     return 0;
> >>> +}
> >>> +early_initcall(init_hw_perf_events);
> >>> diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c
> >>> new file mode 100644
> >>> index 000000000000..a5e9768e8414
> >>> --- /dev/null
> >>> +++ b/arch/loongarch/kernel/perf_regs.c
> >>> @@ -0,0 +1,50 @@
> >>> +// SPDX-License-Identifier: GPL-2.0
> >>> +/*
> >>> + * Copyright (C) 2022 Loongson Technology Corporation Limited
> >> And this file.
> > OK, thanks.
> >
> > Huacai
> >>> + */
> >>> +
> >>> +#include <linux/perf_event.h>
> >>> +
> >>> +#include <asm/ptrace.h>
> >>> +
> >>> +#ifdef CONFIG_32BIT
> >>> +u64 perf_reg_abi(struct task_struct *tsk)
> >>> +{
> >>> +     return PERF_SAMPLE_REGS_ABI_32;
> >>> +}
> >>> +#else /* Must be CONFIG_64BIT */
> >>> +u64 perf_reg_abi(struct task_struct *tsk)
> >>> +{
> >>> +     if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
> >>> +             return PERF_SAMPLE_REGS_ABI_32;
> >>> +     else
> >>> +             return PERF_SAMPLE_REGS_ABI_64;
> >>> +}
> >>> +#endif /* CONFIG_32BIT */
> >>> +
> >>> +int perf_reg_validate(u64 mask)
> >>> +{
> >>> +     if (!mask)
> >>> +             return -EINVAL;
> >>> +     if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
> >>> +             return -EINVAL;
> >>> +     return 0;
> >>> +}
> >>> +
> >>> +u64 perf_reg_value(struct pt_regs *regs, int idx)
> >>> +{
> >>> +     if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
> >>> +             return 0;
> >>> +
> >>> +     if ((u32)idx == PERF_REG_LOONGARCH_PC)
> >>> +             return regs->csr_era;
> >>> +
> >>> +     return regs->regs[idx];
> >>> +}
> >>> +
> >>> +void perf_get_regs_user(struct perf_regs *regs_user,
> >>> +                     struct pt_regs *regs)
> >>> +{
> >>> +     regs_user->regs = task_pt_regs(current);
> >>> +     regs_user->abi = perf_reg_abi(current);
> >>> +}
> >> --
> >> WANG "xen0n" Xuerui
> >>
> >> Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/
> >>
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] LoongArch: Add perf events support
@ 2022-08-15 21:22 kernel test robot
  0 siblings, 0 replies; 7+ messages in thread
From: kernel test robot @ 2022-08-15 21:22 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 7775 bytes --]

BCC: lkp(a)intel.com
CC: kbuild-all(a)lists.01.org
In-Reply-To: <20220815124702.3330803-1-chenhuacai@loongson.cn>
References: <20220815124702.3330803-1-chenhuacai@loongson.cn>
TO: Huacai Chen <chenhuacai@loongson.cn>
TO: Arnd Bergmann <arnd@arndb.de>
TO: Huacai Chen <chenhuacai@kernel.org>
TO: Peter Zijlstra <peterz@infradead.org>
TO: Ingo Molnar <mingo@redhat.com>
TO: Arnaldo Carvalho de Melo <acme@kernel.org>
TO: Mark Rutland <mark.rutland@arm.com>
TO: Alexander Shishkin <alexander.shishkin@linux.intel.com>
TO: Jiri Olsa <jolsa@kernel.org>
TO: Namhyung Kim <namhyung@kernel.org>
CC: loongarch(a)lists.linux.dev
CC: linux-arch(a)vger.kernel.org
CC: Xuefeng Li <lixuefeng@loongson.cn>
CC: Guo Ren <guoren@kernel.org>
CC: Xuerui Wang <kernel@xen0n.name>
CC: Jiaxun Yang <jiaxun.yang@flygoat.com>
CC: linux-perf-users(a)vger.kernel.org
CC: linux-kernel(a)vger.kernel.org

Hi Huacai,

I love your patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v6.0-rc1 next-20220815]
[cannot apply to soc/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Huacai-Chen/LoongArch-Add-perf-events-support/20220815-204852
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 568035b01cfb107af8d2e4bd2fb9aea22cf5b868
:::::: branch date: 9 hours ago
:::::: commit date: 9 hours ago
config: loongarch-randconfig-c041-20220815 (https://download.01.org/0day-ci/archive/20220816/202208160536.N2sRhiCY-lkp(a)intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 12.1.0

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Julia Lawall <julia.lawall@lip6.fr>

cocci warnings: (new ones prefixed by >>)
>> arch/loongarch/kernel/perf_event.c:795:2-8: preceding lock on line 785
   arch/loongarch/kernel/perf_event.c:584:4-10: preceding lock on line 578

vim +795 arch/loongarch/kernel/perf_event.c

0e6d9490ff3f61 Huacai Chen 2022-08-15  768  
0e6d9490ff3f61 Huacai Chen 2022-08-15  769  static int __hw_perf_event_init(struct perf_event *event)
0e6d9490ff3f61 Huacai Chen 2022-08-15  770  {
0e6d9490ff3f61 Huacai Chen 2022-08-15  771  	struct perf_event_attr *attr = &event->attr;
0e6d9490ff3f61 Huacai Chen 2022-08-15  772  	struct hw_perf_event *hwc = &event->hw;
0e6d9490ff3f61 Huacai Chen 2022-08-15  773  	const struct loongarch_perf_event *pev;
0e6d9490ff3f61 Huacai Chen 2022-08-15  774  	int err;
0e6d9490ff3f61 Huacai Chen 2022-08-15  775  
0e6d9490ff3f61 Huacai Chen 2022-08-15  776  	/* Returning LoongArch event descriptor for generic perf event. */
0e6d9490ff3f61 Huacai Chen 2022-08-15  777  	if (PERF_TYPE_HARDWARE == event->attr.type) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  778  		if (event->attr.config >= PERF_COUNT_HW_MAX)
0e6d9490ff3f61 Huacai Chen 2022-08-15  779  			return -EINVAL;
0e6d9490ff3f61 Huacai Chen 2022-08-15  780  		pev = loongarch_pmu_map_general_event(event->attr.config);
0e6d9490ff3f61 Huacai Chen 2022-08-15  781  	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  782  		pev = loongarch_pmu_map_cache_event(event->attr.config);
0e6d9490ff3f61 Huacai Chen 2022-08-15  783  	} else if (PERF_TYPE_RAW == event->attr.type) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  784  		/* We are working on the global raw event. */
0e6d9490ff3f61 Huacai Chen 2022-08-15 @785  		mutex_lock(&raw_event_mutex);
0e6d9490ff3f61 Huacai Chen 2022-08-15  786  		pev = loongarch_pmu.map_raw_event(event->attr.config);
0e6d9490ff3f61 Huacai Chen 2022-08-15  787  	} else {
0e6d9490ff3f61 Huacai Chen 2022-08-15  788  		/* The event type is not (yet) supported. */
0e6d9490ff3f61 Huacai Chen 2022-08-15  789  		return -EOPNOTSUPP;
0e6d9490ff3f61 Huacai Chen 2022-08-15  790  	}
0e6d9490ff3f61 Huacai Chen 2022-08-15  791  
0e6d9490ff3f61 Huacai Chen 2022-08-15  792  	if (IS_ERR(pev)) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  793  		if (PERF_TYPE_RAW == event->attr.type)
0e6d9490ff3f61 Huacai Chen 2022-08-15  794  			mutex_unlock(&raw_event_mutex);
0e6d9490ff3f61 Huacai Chen 2022-08-15 @795  		return PTR_ERR(pev);
0e6d9490ff3f61 Huacai Chen 2022-08-15  796  	}
0e6d9490ff3f61 Huacai Chen 2022-08-15  797  
0e6d9490ff3f61 Huacai Chen 2022-08-15  798  	/*
0e6d9490ff3f61 Huacai Chen 2022-08-15  799  	 * We allow max flexibility on how each individual counter shared
0e6d9490ff3f61 Huacai Chen 2022-08-15  800  	 * by the single CPU operates (the mode exclusion and the range).
0e6d9490ff3f61 Huacai Chen 2022-08-15  801  	 */
0e6d9490ff3f61 Huacai Chen 2022-08-15  802  	hwc->config_base = CSR_PERFCTRL_IE;
0e6d9490ff3f61 Huacai Chen 2022-08-15  803  
0e6d9490ff3f61 Huacai Chen 2022-08-15  804  	hwc->event_base = loongarch_pmu_perf_event_encode(pev);
0e6d9490ff3f61 Huacai Chen 2022-08-15  805  	if (PERF_TYPE_RAW == event->attr.type)
0e6d9490ff3f61 Huacai Chen 2022-08-15  806  		mutex_unlock(&raw_event_mutex);
0e6d9490ff3f61 Huacai Chen 2022-08-15  807  
0e6d9490ff3f61 Huacai Chen 2022-08-15  808  	if (!attr->exclude_user) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  809  		hwc->config_base |= CSR_PERFCTRL_PLV3;
0e6d9490ff3f61 Huacai Chen 2022-08-15  810  		hwc->config_base |= CSR_PERFCTRL_PLV2;
0e6d9490ff3f61 Huacai Chen 2022-08-15  811  	}
0e6d9490ff3f61 Huacai Chen 2022-08-15  812  	if (!attr->exclude_kernel) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  813  		hwc->config_base |= CSR_PERFCTRL_PLV0;
0e6d9490ff3f61 Huacai Chen 2022-08-15  814  	}
0e6d9490ff3f61 Huacai Chen 2022-08-15  815  	if (!attr->exclude_hv) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  816  		hwc->config_base |= CSR_PERFCTRL_PLV1;
0e6d9490ff3f61 Huacai Chen 2022-08-15  817  	}
0e6d9490ff3f61 Huacai Chen 2022-08-15  818  
0e6d9490ff3f61 Huacai Chen 2022-08-15  819  	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
0e6d9490ff3f61 Huacai Chen 2022-08-15  820  	/*
0e6d9490ff3f61 Huacai Chen 2022-08-15  821  	 * The event can belong to another cpu. We do not assign a local
0e6d9490ff3f61 Huacai Chen 2022-08-15  822  	 * counter for it for now.
0e6d9490ff3f61 Huacai Chen 2022-08-15  823  	 */
0e6d9490ff3f61 Huacai Chen 2022-08-15  824  	hwc->idx = -1;
0e6d9490ff3f61 Huacai Chen 2022-08-15  825  	hwc->config = 0;
0e6d9490ff3f61 Huacai Chen 2022-08-15  826  
0e6d9490ff3f61 Huacai Chen 2022-08-15  827  	if (!hwc->sample_period) {
0e6d9490ff3f61 Huacai Chen 2022-08-15  828  		hwc->sample_period  = loongarch_pmu.max_period;
0e6d9490ff3f61 Huacai Chen 2022-08-15  829  		hwc->last_period    = hwc->sample_period;
0e6d9490ff3f61 Huacai Chen 2022-08-15  830  		local64_set(&hwc->period_left, hwc->sample_period);
0e6d9490ff3f61 Huacai Chen 2022-08-15  831  	}
0e6d9490ff3f61 Huacai Chen 2022-08-15  832  
0e6d9490ff3f61 Huacai Chen 2022-08-15  833  	err = 0;
0e6d9490ff3f61 Huacai Chen 2022-08-15  834  	if (event->group_leader != event)
0e6d9490ff3f61 Huacai Chen 2022-08-15  835  		err = validate_group(event);
0e6d9490ff3f61 Huacai Chen 2022-08-15  836  
0e6d9490ff3f61 Huacai Chen 2022-08-15  837  	event->destroy = hw_perf_event_destroy;
0e6d9490ff3f61 Huacai Chen 2022-08-15  838  
0e6d9490ff3f61 Huacai Chen 2022-08-15  839  	if (err)
0e6d9490ff3f61 Huacai Chen 2022-08-15  840  		event->destroy(event);
0e6d9490ff3f61 Huacai Chen 2022-08-15  841  
0e6d9490ff3f61 Huacai Chen 2022-08-15  842  	return err;
0e6d9490ff3f61 Huacai Chen 2022-08-15  843  }
0e6d9490ff3f61 Huacai Chen 2022-08-15  844  

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-08-16 10:24 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-15 12:47 [PATCH] LoongArch: Add perf events support Huacai Chen
2022-08-16  5:46 ` WANG Xuerui
2022-08-16  8:18   ` Huacai Chen
2022-08-16 10:07     ` Qi Hu
2022-08-16 10:24       ` Huacai Chen
2022-08-16  8:59 ` kernel test robot
2022-08-15 21:22 kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.