All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
@ 2020-10-20 23:10 ` Tuan Phan
  0 siblings, 0 replies; 12+ messages in thread
From: Tuan Phan @ 2020-10-20 23:10 UTC (permalink / raw)
  Cc: patches, mark.rutland, robin.murphy, Will Deacon, linux-kernel,
	linux-arm-kernel

DMC-620 PMU supports total 10 counters which each is
independently programmable to different events and can
be started and stopped individually.

Currently, it only supports ACPI. Other platforms feel free to test and add
support for device tree.

Usage example:
  #perf stat -e arm_dmc620_10008c000/clk_cycle_count/ -C 0
  Get perf event for clk_cycle_count counter.

  #perf stat -e arm_dmc620_10008c000/clkdiv2_allocate,mask=0x1f,match=0x2f,
  incr=2,invert=1/ -C 0
  The above example shows how to specify mask, match, incr,
  invert parameters for clkdiv2_allocate event.

Signed-off-by: Tuan Phan <tuanphan@os.amperecomputing.com>
---
Changes in v4:
- Addressed Robin's comments.

Changes in v3:
- Removed "_OFFSET" suffix.
- Renamed "affinity" to "irq".
- Have a better definition of group register.

Changes in v2:
- Removed IRQF_SHARED flag and added support for multiple 
PMUs sharing the same interrupt.
- Fixed an interrupt handler race condition.

The ACPI binding spec for PMU DMC620 can be downloaded at:
https://developer.arm.com/documentation/den0093/c/

 drivers/perf/Kconfig          |   7 +
 drivers/perf/Makefile         |   1 +
 drivers/perf/arm_dmc620_pmu.c | 746 ++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 754 insertions(+)
 create mode 100644 drivers/perf/arm_dmc620_pmu.c

diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 130327ff..3075cf1 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -130,6 +130,13 @@ config ARM_SPE_PMU
 	  Extension, which provides periodic sampling of operations in
 	  the CPU pipeline and reports this via the perf AUX interface.
 
+config ARM_DMC620_PMU
+	tristate "Enable PMU support for the ARM DMC-620 memory controller"
+	depends on (ARM64 && ACPI) || COMPILE_TEST
+	help
+	  Support for PMU events monitoring on the ARM DMC-620 memory
+	  controller.
+
 source "drivers/perf/hisilicon/Kconfig"
 
 endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 5365fd5..5260b11 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
 obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
 obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
 obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
+obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
new file mode 100644
index 0000000..d72cd54
--- /dev/null
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM DMC-620 memory controller PMU driver
+ *
+ * Copyright (C) 2020 Ampere Computing LLC.
+ */
+
+#define DMC620_PMUNAME		"arm_dmc620"
+#define DMC620_DRVNAME		DMC620_PMUNAME "_pmu"
+#define pr_fmt(fmt)		DMC620_DRVNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/refcount.h>
+
+#define DMC620_PA_SHIFT					12
+#define DMC620_CNT_INIT					0x80000000
+#define DMC620_CNT_MAX_PERIOD				0xffffffff
+#define DMC620_PMU_CLKDIV2_MAX_COUNTERS			8
+#define DMC620_PMU_CLK_MAX_COUNTERS			2
+#define DMC620_PMU_MAX_COUNTERS				\
+	(DMC620_PMU_CLKDIV2_MAX_COUNTERS + DMC620_PMU_CLK_MAX_COUNTERS)
+
+/*
+ * The PMU registers start at 0xA00 in the DMC-620 memory map, and these
+ * offsets are relative to that base.
+ *
+ * Each counter has a group of control/value registers, and the
+ * DMC620_PMU_COUNTERn offsets are within a counter group.
+ *
+ * The counter registers groups start at 0xA10.
+ */
+#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2		0x8
+#define  DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK	\
+		(DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1)
+#define DMC620_PMU_OVERFLOW_STATUS_CLK			0xC
+#define  DMC620_PMU_OVERFLOW_STATUS_CLK_MASK		\
+		(DMC620_PMU_CLK_MAX_COUNTERS - 1)
+#define DMC620_PMU_COUNTERS_BASE			0x10
+#define DMC620_PMU_COUNTERn_MASK_31_00			0x0
+#define DMC620_PMU_COUNTERn_MASK_63_32			0x4
+#define DMC620_PMU_COUNTERn_MATCH_31_00			0x8
+#define DMC620_PMU_COUNTERn_MATCH_63_32			0xC
+#define DMC620_PMU_COUNTERn_CONTROL			0x10
+#define  DMC620_PMU_COUNTERn_CONTROL_ENABLE		BIT(0)
+#define  DMC620_PMU_COUNTERn_CONTROL_INVERT		BIT(1)
+#define  DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX		GENMASK(6, 2)
+#define  DMC620_PMU_COUNTERn_CONTROL_INCR_MUX		GENMASK(8, 7)
+#define DMC620_PMU_COUNTERn_VALUE			0x20
+/* Offset of the registers for a given counter, relative to 0xA00 */
+#define DMC620_PMU_COUNTERn_OFFSET(n) \
+	(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
+
+static LIST_HEAD(dmc620_pmu_irqs);
+static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
+
+struct dmc620_pmu_irq {
+	struct hlist_node node;
+	struct list_head pmus_node;
+	struct list_head irqs_node;
+	refcount_t refcount;
+	unsigned int irq_num;
+	unsigned int cpu;
+};
+
+struct dmc620_pmu {
+	struct pmu pmu;
+	struct platform_device *pdev;
+
+	void __iomem *base;
+	struct dmc620_pmu_irq *irq;
+	struct list_head pmus_node;
+
+	/*
+	 * We put all clkdiv2 and clk counters to a same array.
+	 * The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to
+	 * clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS
+	 * belong to clk counters.
+	 */
+	DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS);
+	struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
+};
+
+#define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
+
+static int cpuhp_state_num;
+
+struct dmc620_pmu_event_attr {
+	struct device_attribute attr;
+	u8 clkdiv2;
+	u8 eventid;
+};
+
+static ssize_t
+dmc620_pmu_event_show(struct device *dev,
+			   struct device_attribute *attr, char *page)
+{
+	struct dmc620_pmu_event_attr *eattr;
+
+	eattr = container_of(attr, typeof(*eattr), attr);
+
+	return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
+}
+
+#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2)		\
+	(&((struct dmc620_pmu_event_attr[]) {{				\
+		.attr = __ATTR(_name, 0444, dmc620_pmu_event_show, NULL),	\
+		.clkdiv2 = _clkdiv2,						\
+		.eventid = _eventid,					\
+	}})[0].attr.attr)
+
+static struct attribute *dmc620_pmu_events_attrs[] = {
+	/* clkdiv2 events list */
+	DMC620_PMU_EVENT_ATTR(clkdiv2_cycle_count, 0x0, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_allocate, 0x1, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_queue_depth, 0x2, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_wr_data, 0x3, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_read_backlog, 0x4, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_mi, 0x5, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_hazard_resolution, 0x6, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_enqueue, 0x7, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_arbitrate, 0x8, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_lrank_turnaround_activate, 0x9, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_prank_turnaround_activate, 0xa, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_read_depth, 0xb, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_write_depth, 0xc, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_highigh_qos_depth, 0xd, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_high_qos_depth, 0xe, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_medium_qos_depth, 0xf, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_low_qos_depth, 0x10, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_activate, 0x11, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_rdwr, 0x12, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_refresh, 0x13, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_training_request, 0x14, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_t_mac_tracker, 0x15, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_fsm_tracker, 0x16, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_open_tracker, 0x17, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_pwr_down, 0x18, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_sref, 0x19, 1),
+
+	/* clk events list */
+	DMC620_PMU_EVENT_ATTR(clk_cycle_count, 0x0, 0),
+	DMC620_PMU_EVENT_ATTR(clk_request, 0x1, 0),
+	DMC620_PMU_EVENT_ATTR(clk_upload_stall, 0x2, 0),
+	NULL,
+};
+
+static struct attribute_group dmc620_pmu_events_attr_group = {
+	.name = "events",
+	.attrs = dmc620_pmu_events_attrs,
+};
+
+/* User ABI */
+#define ATTR_CFG_FLD_mask_CFG		config
+#define ATTR_CFG_FLD_mask_LO		0
+#define ATTR_CFG_FLD_mask_HI		44
+#define ATTR_CFG_FLD_match_CFG		config1
+#define ATTR_CFG_FLD_match_LO		0
+#define ATTR_CFG_FLD_match_HI		44
+#define ATTR_CFG_FLD_invert_CFG		config2
+#define ATTR_CFG_FLD_invert_LO		0
+#define ATTR_CFG_FLD_invert_HI		0
+#define ATTR_CFG_FLD_incr_CFG		config2
+#define ATTR_CFG_FLD_incr_LO		1
+#define ATTR_CFG_FLD_incr_HI		2
+#define ATTR_CFG_FLD_event_CFG		config2
+#define ATTR_CFG_FLD_event_LO		3
+#define ATTR_CFG_FLD_event_HI		8
+#define ATTR_CFG_FLD_clkdiv2_CFG	config2
+#define ATTR_CFG_FLD_clkdiv2_LO		9
+#define ATTR_CFG_FLD_clkdiv2_HI		9
+
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
+	(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
+	__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name)				\
+	PMU_FORMAT_ATTR(name,					\
+	_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG,		\
+			     ATTR_CFG_FLD_##name##_LO,		\
+			     ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
+	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name)				\
+	_ATTR_CFG_GET_FLD(attr,					\
+			  ATTR_CFG_FLD_##name##_CFG,		\
+			  ATTR_CFG_FLD_##name##_LO,		\
+			  ATTR_CFG_FLD_##name##_HI)
+
+GEN_PMU_FORMAT_ATTR(mask);
+GEN_PMU_FORMAT_ATTR(match);
+GEN_PMU_FORMAT_ATTR(invert);
+GEN_PMU_FORMAT_ATTR(incr);
+GEN_PMU_FORMAT_ATTR(event);
+GEN_PMU_FORMAT_ATTR(clkdiv2);
+
+static struct attribute *dmc620_pmu_formats_attrs[] = {
+	&format_attr_mask.attr,
+	&format_attr_match.attr,
+	&format_attr_invert.attr,
+	&format_attr_incr.attr,
+	&format_attr_event.attr,
+	&format_attr_clkdiv2.attr,
+	NULL,
+};
+
+static struct attribute_group dmc620_pmu_format_attr_group = {
+	.name	= "format",
+	.attrs	= dmc620_pmu_formats_attrs,
+};
+
+static const struct attribute_group *dmc620_pmu_attr_groups[] = {
+	&dmc620_pmu_events_attr_group,
+	&dmc620_pmu_format_attr_group,
+	NULL,
+};
+
+static inline
+u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
+			unsigned int idx, unsigned int reg)
+{
+	return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
+}
+
+static inline
+void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
+			unsigned int idx, unsigned int reg, u32 val)
+{
+	writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
+}
+
+static
+unsigned int dmc620_event_to_counter_control(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	unsigned int reg = 0;
+
+	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INVERT,
+			ATTR_CFG_GET_FLD(attr, invert));
+	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INCR_MUX,
+			ATTR_CFG_GET_FLD(attr, incr));
+	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX,
+			ATTR_CFG_GET_FLD(attr, event));
+
+	return reg;
+}
+
+static int dmc620_get_event_idx(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	int idx, start_idx, end_idx;
+
+	if (ATTR_CFG_GET_FLD(&event->attr, clkdiv2)) {
+		start_idx = 0;
+		end_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
+	} else {
+		start_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
+		end_idx = DMC620_PMU_MAX_COUNTERS;
+	}
+
+	for (idx = start_idx; idx < end_idx; ++idx) {
+		if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
+			return idx;
+	}
+
+	/* The counters are all in use. */
+	return -EAGAIN;
+}
+
+static u64 dmc620_pmu_read_counter(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+
+	return dmc620_pmu_creg_read(dmc620_pmu,
+				    event->hw.idx, DMC620_PMU_COUNTERn_VALUE);
+}
+
+static void dmc620_pmu_event_update(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_count, new_count;
+
+	do {
+		/* We may also be called from the irq handler */
+		prev_count = local64_read(&hwc->prev_count);
+		new_count = dmc620_pmu_read_counter(event);
+	} while (local64_cmpxchg(&hwc->prev_count,
+			prev_count, new_count) != prev_count);
+	delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
+	local64_add(delta, &event->count);
+}
+
+static void dmc620_pmu_event_set_period(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+
+	local64_set(&event->hw.prev_count, DMC620_CNT_INIT);
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_VALUE, DMC620_CNT_INIT);
+}
+
+static void dmc620_pmu_enable_counter(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct perf_event_attr *attr = &event->attr;
+	u64 reg;
+
+	reg = ATTR_CFG_GET_FLD(attr, mask);
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_31_00, lower_32_bits(reg));
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_63_32, upper_32_bits(reg));
+
+	reg = ATTR_CFG_GET_FLD(attr, match);
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_31_00, lower_32_bits(reg));
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_63_32, upper_32_bits(reg));
+
+	reg = dmc620_event_to_counter_control(event) | DMC620_PMU_COUNTERn_CONTROL_ENABLE;
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, (u32)reg);
+}
+
+static void dmc620_pmu_disable_counter(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, 0);
+}
+
+static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
+{
+	struct dmc620_pmu_irq *irq = data;
+	struct dmc620_pmu *dmc620_pmu;
+	irqreturn_t ret = IRQ_NONE;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
+		unsigned long status;
+		struct perf_event *event;
+		unsigned int idx;
+
+		/*
+		 * HW doesn't provide a control to atomically disable all counters.
+		 * To prevent race condition, disable all events before continuing
+		 */
+		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
+			event = dmc620_pmu->events[idx];
+			if (!event)
+				continue;
+			dmc620_pmu_disable_counter(event);
+		}
+
+		status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
+		status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
+				DMC620_PMU_CLKDIV2_MAX_COUNTERS);
+		if (!status)
+			continue;
+
+		for_each_set_bit(idx, &status,
+				  DMC620_PMU_MAX_COUNTERS) {
+			event = dmc620_pmu->events[idx];
+			if (WARN_ON_ONCE(!event))
+				continue;
+			dmc620_pmu_event_update(event);
+			dmc620_pmu_event_set_period(event);
+		}
+
+		if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
+			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
+
+		if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
+			DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
+			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
+
+		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
+			event = dmc620_pmu->events[idx];
+			if (!event)
+				continue;
+			if (!(event->hw.state & PERF_HES_STOPPED))
+				dmc620_pmu_enable_counter(event);
+		}
+
+		ret = IRQ_HANDLED;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
+{
+	struct dmc620_pmu_irq *irq;
+	int ret;
+
+	list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
+		if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
+			return irq;
+
+	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
+	if (!irq)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&irq->pmus_node);
+
+	/* Pick one CPU to be the preferred one to use */
+	irq->cpu = raw_smp_processor_id();
+	refcount_set(&irq->refcount, 1);
+
+	ret = request_irq(irq_num, dmc620_pmu_handle_irq,
+			  IRQF_NOBALANCING | IRQF_NO_THREAD,
+			  "dmc620-pmu", irq);
+	if (ret)
+		goto out_free_aff;
+
+	ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
+	if (ret)
+		goto out_free_irq;
+
+	ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
+	if (ret)
+		goto out_free_irq;
+
+	irq->irq_num = irq_num;
+	list_add(&irq->irqs_node, &dmc620_pmu_irqs);
+
+	return irq;
+
+out_free_irq:
+	free_irq(irq_num, irq);
+out_free_aff:
+	kfree(irq);
+	return ERR_PTR(ret);
+}
+
+static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
+{
+	struct dmc620_pmu_irq *irq;
+
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	irq = __dmc620_pmu_get_irq(irq_num);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	if (IS_ERR(irq))
+		return PTR_ERR(irq);
+
+	dmc620_pmu->irq = irq;
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	return 0;
+}
+
+static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
+{
+	struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
+
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	list_del_rcu(&dmc620_pmu->pmus_node);
+
+	if (!refcount_dec_and_test(&irq->refcount)) {
+		mutex_unlock(&dmc620_pmu_irqs_lock);
+		return;
+	}
+
+	list_del(&irq->irqs_node);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	free_irq(irq->irq_num, irq);
+	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
+	kfree(irq);
+}
+
+static int dmc620_pmu_event_init(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	struct perf_event *sibling;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	/*
+	 * DMC 620 PMUs are shared across all cpus and cannot
+	 * support task bound and sampling events.
+	 */
+	if (is_sampling_event(event) ||
+		event->attach_state & PERF_ATTACH_TASK) {
+		dev_dbg(dmc620_pmu->pmu.dev,
+			"Can't support per-task counters\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Many perf core operations (eg. events rotation) operate on a
+	 * single CPU context. This is obvious for CPU PMUs, where one
+	 * expects the same sets of events being observed on all CPUs,
+	 * but can lead to issues for off-core PMUs, where each
+	 * event could be theoretically assigned to a different CPU. To
+	 * mitigate this, we enforce CPU assignment to one, selected
+	 * processor.
+	 */
+	event->cpu = dmc620_pmu->irq->cpu;
+	if (event->cpu < 0)
+		return -EINVAL;
+
+	/*
+	 * We must NOT create groups containing mixed PMUs, although software
+	 * events are acceptable.
+	 */
+	if (event->group_leader->pmu != event->pmu &&
+			!is_software_event(event->group_leader))
+		return -EINVAL;
+
+	for_each_sibling_event(sibling, event->group_leader) {
+		if (sibling->pmu != event->pmu &&
+				!is_software_event(sibling))
+			return -EINVAL;
+	}
+
+	hwc->idx = -1;
+	return 0;
+}
+
+static void dmc620_pmu_read(struct perf_event *event)
+{
+	dmc620_pmu_event_update(event);
+}
+
+static void dmc620_pmu_start(struct perf_event *event, int flags)
+{
+	event->hw.state = 0;
+	dmc620_pmu_event_set_period(event);
+	dmc620_pmu_enable_counter(event);
+}
+
+static void dmc620_pmu_stop(struct perf_event *event, int flags)
+{
+	if (event->hw.state & PERF_HES_STOPPED)
+		return;
+
+	dmc620_pmu_disable_counter(event);
+	dmc620_pmu_event_update(event);
+	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dmc620_pmu_add(struct perf_event *event, int flags)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+
+	idx = dmc620_get_event_idx(event);
+	if (idx < 0)
+		return idx;
+
+	hwc->idx = idx;
+	dmc620_pmu->events[idx] = event;
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+	if (flags & PERF_EF_START)
+		dmc620_pmu_start(event, PERF_EF_RELOAD);
+
+	perf_event_update_userpage(event);
+	return 0;
+}
+
+static void dmc620_pmu_del(struct perf_event *event, int flags)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	dmc620_pmu_stop(event, PERF_EF_UPDATE);
+	dmc620_pmu->events[idx] = NULL;
+	clear_bit(idx, dmc620_pmu->used_mask);
+	perf_event_update_userpage(event);
+}
+
+static int dmc620_pmu_cpu_teardown(unsigned int cpu,
+				   struct hlist_node *node)
+{
+	struct dmc620_pmu_irq *irq;
+	struct dmc620_pmu *dmc620_pmu;
+	unsigned int target;
+
+	irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
+	if (cpu != irq->cpu)
+		return 0;
+
+	target = cpumask_any_but(cpu_online_mask, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+
+	/* We're only reading, but this isn't the place to be involving RCU */
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
+		perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
+	irq->cpu = target;
+
+	return 0;
+}
+
+static int dmc620_pmu_device_probe(struct platform_device *pdev)
+{
+	struct dmc620_pmu *dmc620_pmu;
+	struct resource *res;
+	char *name;
+	int irq_num;
+	int i, ret;
+
+	dmc620_pmu = devm_kzalloc(&pdev->dev,
+			sizeof(struct dmc620_pmu), GFP_KERNEL);
+	if (!dmc620_pmu)
+		return -ENOMEM;
+
+	dmc620_pmu->pdev = pdev;
+	platform_set_drvdata(pdev, dmc620_pmu);
+
+	dmc620_pmu->pmu = (struct pmu) {
+		.module = THIS_MODULE,
+		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
+		.task_ctx_nr	= perf_invalid_context,
+		.event_init	= dmc620_pmu_event_init,
+		.add		= dmc620_pmu_add,
+		.del		= dmc620_pmu_del,
+		.start		= dmc620_pmu_start,
+		.stop		= dmc620_pmu_stop,
+		.read		= dmc620_pmu_read,
+		.attr_groups	= dmc620_pmu_attr_groups,
+	};
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dmc620_pmu->base))
+		return PTR_ERR(dmc620_pmu->base);
+
+	/* Make sure device is reset before enabling interrupt */
+	for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
+		dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
+
+	irq_num = platform_get_irq(pdev, 0);
+	if (irq_num < 0)
+		return irq_num;
+
+	ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
+	if (ret)
+		return ret;
+
+	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+				  "%s_%llx", DMC620_PMUNAME,
+				  (res->start) >> DMC620_PA_SHIFT);
+	if (!name) {
+		dev_err(&pdev->dev,
+			  "Create name failed, PMU @%pa\n", &res->start);
+		goto out_teardown_dev;
+	}
+
+	ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
+	if (ret)
+		goto out_teardown_dev;
+
+	return 0;
+
+out_teardown_dev:
+	dmc620_pmu_put_irq(dmc620_pmu);
+	synchronize_rcu();
+	return ret;
+}
+
+static int dmc620_pmu_device_remove(struct platform_device *pdev)
+{
+	struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
+
+	dmc620_pmu_put_irq(dmc620_pmu);
+
+	/* perf will synchronise RCU before devres can free dmc620_pmu */
+	perf_pmu_unregister(&dmc620_pmu->pmu);
+
+	return 0;
+}
+
+static const struct acpi_device_id dmc620_acpi_match[] = {
+	{ "ARMHD620", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, dmc620_acpi_match);
+static struct platform_driver dmc620_pmu_driver = {
+	.driver	= {
+		.name		= DMC620_DRVNAME,
+		.acpi_match_table = dmc620_acpi_match,
+	},
+	.probe	= dmc620_pmu_device_probe,
+	.remove	= dmc620_pmu_device_remove,
+};
+
+static int __init dmc620_pmu_init(void)
+{
+	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+				      DMC620_DRVNAME,
+				      NULL,
+				      dmc620_pmu_cpu_teardown);
+	if (cpuhp_state_num < 0)
+		return cpuhp_state_num;
+
+	return platform_driver_register(&dmc620_pmu_driver);
+}
+
+static void __exit dmc620_pmu_exit(void)
+{
+	platform_driver_unregister(&dmc620_pmu_driver);
+	cpuhp_remove_multi_state(cpuhp_state_num);
+}
+
+module_init(dmc620_pmu_init);
+module_exit(dmc620_pmu_exit);
+
+MODULE_DESCRIPTION("Perf driver for the ARM DMC-620 memory controller");
+MODULE_AUTHOR("Tuan Phan <tuanphan@os.amperecomputing.com");
+MODULE_LICENSE("GPL v2");
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
@ 2020-10-20 23:10 ` Tuan Phan
  0 siblings, 0 replies; 12+ messages in thread
From: Tuan Phan @ 2020-10-20 23:10 UTC (permalink / raw)
  Cc: mark.rutland, robin.murphy, linux-kernel, patches, Will Deacon,
	linux-arm-kernel

DMC-620 PMU supports total 10 counters which each is
independently programmable to different events and can
be started and stopped individually.

Currently, it only supports ACPI. Other platforms feel free to test and add
support for device tree.

Usage example:
  #perf stat -e arm_dmc620_10008c000/clk_cycle_count/ -C 0
  Get perf event for clk_cycle_count counter.

  #perf stat -e arm_dmc620_10008c000/clkdiv2_allocate,mask=0x1f,match=0x2f,
  incr=2,invert=1/ -C 0
  The above example shows how to specify mask, match, incr,
  invert parameters for clkdiv2_allocate event.

Signed-off-by: Tuan Phan <tuanphan@os.amperecomputing.com>
---
Changes in v4:
- Addressed Robin's comments.

Changes in v3:
- Removed "_OFFSET" suffix.
- Renamed "affinity" to "irq".
- Have a better definition of group register.

Changes in v2:
- Removed IRQF_SHARED flag and added support for multiple 
PMUs sharing the same interrupt.
- Fixed an interrupt handler race condition.

The ACPI binding spec for PMU DMC620 can be downloaded at:
https://developer.arm.com/documentation/den0093/c/

 drivers/perf/Kconfig          |   7 +
 drivers/perf/Makefile         |   1 +
 drivers/perf/arm_dmc620_pmu.c | 746 ++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 754 insertions(+)
 create mode 100644 drivers/perf/arm_dmc620_pmu.c

diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 130327ff..3075cf1 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -130,6 +130,13 @@ config ARM_SPE_PMU
 	  Extension, which provides periodic sampling of operations in
 	  the CPU pipeline and reports this via the perf AUX interface.
 
+config ARM_DMC620_PMU
+	tristate "Enable PMU support for the ARM DMC-620 memory controller"
+	depends on (ARM64 && ACPI) || COMPILE_TEST
+	help
+	  Support for PMU events monitoring on the ARM DMC-620 memory
+	  controller.
+
 source "drivers/perf/hisilicon/Kconfig"
 
 endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 5365fd5..5260b11 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
 obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
 obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
 obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
+obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
new file mode 100644
index 0000000..d72cd54
--- /dev/null
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM DMC-620 memory controller PMU driver
+ *
+ * Copyright (C) 2020 Ampere Computing LLC.
+ */
+
+#define DMC620_PMUNAME		"arm_dmc620"
+#define DMC620_DRVNAME		DMC620_PMUNAME "_pmu"
+#define pr_fmt(fmt)		DMC620_DRVNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/refcount.h>
+
+#define DMC620_PA_SHIFT					12
+#define DMC620_CNT_INIT					0x80000000
+#define DMC620_CNT_MAX_PERIOD				0xffffffff
+#define DMC620_PMU_CLKDIV2_MAX_COUNTERS			8
+#define DMC620_PMU_CLK_MAX_COUNTERS			2
+#define DMC620_PMU_MAX_COUNTERS				\
+	(DMC620_PMU_CLKDIV2_MAX_COUNTERS + DMC620_PMU_CLK_MAX_COUNTERS)
+
+/*
+ * The PMU registers start at 0xA00 in the DMC-620 memory map, and these
+ * offsets are relative to that base.
+ *
+ * Each counter has a group of control/value registers, and the
+ * DMC620_PMU_COUNTERn offsets are within a counter group.
+ *
+ * The counter registers groups start at 0xA10.
+ */
+#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2		0x8
+#define  DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK	\
+		(DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1)
+#define DMC620_PMU_OVERFLOW_STATUS_CLK			0xC
+#define  DMC620_PMU_OVERFLOW_STATUS_CLK_MASK		\
+		(DMC620_PMU_CLK_MAX_COUNTERS - 1)
+#define DMC620_PMU_COUNTERS_BASE			0x10
+#define DMC620_PMU_COUNTERn_MASK_31_00			0x0
+#define DMC620_PMU_COUNTERn_MASK_63_32			0x4
+#define DMC620_PMU_COUNTERn_MATCH_31_00			0x8
+#define DMC620_PMU_COUNTERn_MATCH_63_32			0xC
+#define DMC620_PMU_COUNTERn_CONTROL			0x10
+#define  DMC620_PMU_COUNTERn_CONTROL_ENABLE		BIT(0)
+#define  DMC620_PMU_COUNTERn_CONTROL_INVERT		BIT(1)
+#define  DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX		GENMASK(6, 2)
+#define  DMC620_PMU_COUNTERn_CONTROL_INCR_MUX		GENMASK(8, 7)
+#define DMC620_PMU_COUNTERn_VALUE			0x20
+/* Offset of the registers for a given counter, relative to 0xA00 */
+#define DMC620_PMU_COUNTERn_OFFSET(n) \
+	(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
+
+static LIST_HEAD(dmc620_pmu_irqs);
+static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
+
+struct dmc620_pmu_irq {
+	struct hlist_node node;
+	struct list_head pmus_node;
+	struct list_head irqs_node;
+	refcount_t refcount;
+	unsigned int irq_num;
+	unsigned int cpu;
+};
+
+struct dmc620_pmu {
+	struct pmu pmu;
+	struct platform_device *pdev;
+
+	void __iomem *base;
+	struct dmc620_pmu_irq *irq;
+	struct list_head pmus_node;
+
+	/*
+	 * We put all clkdiv2 and clk counters to a same array.
+	 * The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to
+	 * clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS
+	 * belong to clk counters.
+	 */
+	DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS);
+	struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
+};
+
+#define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
+
+static int cpuhp_state_num;
+
+struct dmc620_pmu_event_attr {
+	struct device_attribute attr;
+	u8 clkdiv2;
+	u8 eventid;
+};
+
+static ssize_t
+dmc620_pmu_event_show(struct device *dev,
+			   struct device_attribute *attr, char *page)
+{
+	struct dmc620_pmu_event_attr *eattr;
+
+	eattr = container_of(attr, typeof(*eattr), attr);
+
+	return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
+}
+
+#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2)		\
+	(&((struct dmc620_pmu_event_attr[]) {{				\
+		.attr = __ATTR(_name, 0444, dmc620_pmu_event_show, NULL),	\
+		.clkdiv2 = _clkdiv2,						\
+		.eventid = _eventid,					\
+	}})[0].attr.attr)
+
+static struct attribute *dmc620_pmu_events_attrs[] = {
+	/* clkdiv2 events list */
+	DMC620_PMU_EVENT_ATTR(clkdiv2_cycle_count, 0x0, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_allocate, 0x1, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_queue_depth, 0x2, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_wr_data, 0x3, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_read_backlog, 0x4, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_mi, 0x5, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_hazard_resolution, 0x6, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_enqueue, 0x7, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_arbitrate, 0x8, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_lrank_turnaround_activate, 0x9, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_prank_turnaround_activate, 0xa, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_read_depth, 0xb, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_write_depth, 0xc, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_highigh_qos_depth, 0xd, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_high_qos_depth, 0xe, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_medium_qos_depth, 0xf, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_low_qos_depth, 0x10, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_activate, 0x11, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_rdwr, 0x12, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_refresh, 0x13, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_training_request, 0x14, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_t_mac_tracker, 0x15, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_fsm_tracker, 0x16, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_open_tracker, 0x17, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_pwr_down, 0x18, 1),
+	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_sref, 0x19, 1),
+
+	/* clk events list */
+	DMC620_PMU_EVENT_ATTR(clk_cycle_count, 0x0, 0),
+	DMC620_PMU_EVENT_ATTR(clk_request, 0x1, 0),
+	DMC620_PMU_EVENT_ATTR(clk_upload_stall, 0x2, 0),
+	NULL,
+};
+
+static struct attribute_group dmc620_pmu_events_attr_group = {
+	.name = "events",
+	.attrs = dmc620_pmu_events_attrs,
+};
+
+/* User ABI */
+#define ATTR_CFG_FLD_mask_CFG		config
+#define ATTR_CFG_FLD_mask_LO		0
+#define ATTR_CFG_FLD_mask_HI		44
+#define ATTR_CFG_FLD_match_CFG		config1
+#define ATTR_CFG_FLD_match_LO		0
+#define ATTR_CFG_FLD_match_HI		44
+#define ATTR_CFG_FLD_invert_CFG		config2
+#define ATTR_CFG_FLD_invert_LO		0
+#define ATTR_CFG_FLD_invert_HI		0
+#define ATTR_CFG_FLD_incr_CFG		config2
+#define ATTR_CFG_FLD_incr_LO		1
+#define ATTR_CFG_FLD_incr_HI		2
+#define ATTR_CFG_FLD_event_CFG		config2
+#define ATTR_CFG_FLD_event_LO		3
+#define ATTR_CFG_FLD_event_HI		8
+#define ATTR_CFG_FLD_clkdiv2_CFG	config2
+#define ATTR_CFG_FLD_clkdiv2_LO		9
+#define ATTR_CFG_FLD_clkdiv2_HI		9
+
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
+	(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
+	__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name)				\
+	PMU_FORMAT_ATTR(name,					\
+	_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG,		\
+			     ATTR_CFG_FLD_##name##_LO,		\
+			     ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
+	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name)				\
+	_ATTR_CFG_GET_FLD(attr,					\
+			  ATTR_CFG_FLD_##name##_CFG,		\
+			  ATTR_CFG_FLD_##name##_LO,		\
+			  ATTR_CFG_FLD_##name##_HI)
+
+GEN_PMU_FORMAT_ATTR(mask);
+GEN_PMU_FORMAT_ATTR(match);
+GEN_PMU_FORMAT_ATTR(invert);
+GEN_PMU_FORMAT_ATTR(incr);
+GEN_PMU_FORMAT_ATTR(event);
+GEN_PMU_FORMAT_ATTR(clkdiv2);
+
+static struct attribute *dmc620_pmu_formats_attrs[] = {
+	&format_attr_mask.attr,
+	&format_attr_match.attr,
+	&format_attr_invert.attr,
+	&format_attr_incr.attr,
+	&format_attr_event.attr,
+	&format_attr_clkdiv2.attr,
+	NULL,
+};
+
+static struct attribute_group dmc620_pmu_format_attr_group = {
+	.name	= "format",
+	.attrs	= dmc620_pmu_formats_attrs,
+};
+
+static const struct attribute_group *dmc620_pmu_attr_groups[] = {
+	&dmc620_pmu_events_attr_group,
+	&dmc620_pmu_format_attr_group,
+	NULL,
+};
+
+static inline
+u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
+			unsigned int idx, unsigned int reg)
+{
+	return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
+}
+
+static inline
+void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
+			unsigned int idx, unsigned int reg, u32 val)
+{
+	writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
+}
+
+static
+unsigned int dmc620_event_to_counter_control(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	unsigned int reg = 0;
+
+	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INVERT,
+			ATTR_CFG_GET_FLD(attr, invert));
+	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INCR_MUX,
+			ATTR_CFG_GET_FLD(attr, incr));
+	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX,
+			ATTR_CFG_GET_FLD(attr, event));
+
+	return reg;
+}
+
+static int dmc620_get_event_idx(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	int idx, start_idx, end_idx;
+
+	if (ATTR_CFG_GET_FLD(&event->attr, clkdiv2)) {
+		start_idx = 0;
+		end_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
+	} else {
+		start_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
+		end_idx = DMC620_PMU_MAX_COUNTERS;
+	}
+
+	for (idx = start_idx; idx < end_idx; ++idx) {
+		if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
+			return idx;
+	}
+
+	/* The counters are all in use. */
+	return -EAGAIN;
+}
+
+static u64 dmc620_pmu_read_counter(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+
+	return dmc620_pmu_creg_read(dmc620_pmu,
+				    event->hw.idx, DMC620_PMU_COUNTERn_VALUE);
+}
+
+static void dmc620_pmu_event_update(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_count, new_count;
+
+	do {
+		/* We may also be called from the irq handler */
+		prev_count = local64_read(&hwc->prev_count);
+		new_count = dmc620_pmu_read_counter(event);
+	} while (local64_cmpxchg(&hwc->prev_count,
+			prev_count, new_count) != prev_count);
+	delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
+	local64_add(delta, &event->count);
+}
+
+static void dmc620_pmu_event_set_period(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+
+	local64_set(&event->hw.prev_count, DMC620_CNT_INIT);
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_VALUE, DMC620_CNT_INIT);
+}
+
+static void dmc620_pmu_enable_counter(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct perf_event_attr *attr = &event->attr;
+	u64 reg;
+
+	reg = ATTR_CFG_GET_FLD(attr, mask);
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_31_00, lower_32_bits(reg));
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_63_32, upper_32_bits(reg));
+
+	reg = ATTR_CFG_GET_FLD(attr, match);
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_31_00, lower_32_bits(reg));
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_63_32, upper_32_bits(reg));
+
+	reg = dmc620_event_to_counter_control(event) | DMC620_PMU_COUNTERn_CONTROL_ENABLE;
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, (u32)reg);
+}
+
+static void dmc620_pmu_disable_counter(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+
+	dmc620_pmu_creg_write(dmc620_pmu,
+			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, 0);
+}
+
+static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
+{
+	struct dmc620_pmu_irq *irq = data;
+	struct dmc620_pmu *dmc620_pmu;
+	irqreturn_t ret = IRQ_NONE;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
+		unsigned long status;
+		struct perf_event *event;
+		unsigned int idx;
+
+		/*
+		 * HW doesn't provide a control to atomically disable all counters.
+		 * To prevent race condition, disable all events before continuing
+		 */
+		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
+			event = dmc620_pmu->events[idx];
+			if (!event)
+				continue;
+			dmc620_pmu_disable_counter(event);
+		}
+
+		status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
+		status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
+				DMC620_PMU_CLKDIV2_MAX_COUNTERS);
+		if (!status)
+			continue;
+
+		for_each_set_bit(idx, &status,
+				  DMC620_PMU_MAX_COUNTERS) {
+			event = dmc620_pmu->events[idx];
+			if (WARN_ON_ONCE(!event))
+				continue;
+			dmc620_pmu_event_update(event);
+			dmc620_pmu_event_set_period(event);
+		}
+
+		if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
+			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
+
+		if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
+			DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
+			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
+
+		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
+			event = dmc620_pmu->events[idx];
+			if (!event)
+				continue;
+			if (!(event->hw.state & PERF_HES_STOPPED))
+				dmc620_pmu_enable_counter(event);
+		}
+
+		ret = IRQ_HANDLED;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
+{
+	struct dmc620_pmu_irq *irq;
+	int ret;
+
+	list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
+		if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
+			return irq;
+
+	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
+	if (!irq)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&irq->pmus_node);
+
+	/* Pick one CPU to be the preferred one to use */
+	irq->cpu = raw_smp_processor_id();
+	refcount_set(&irq->refcount, 1);
+
+	ret = request_irq(irq_num, dmc620_pmu_handle_irq,
+			  IRQF_NOBALANCING | IRQF_NO_THREAD,
+			  "dmc620-pmu", irq);
+	if (ret)
+		goto out_free_aff;
+
+	ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
+	if (ret)
+		goto out_free_irq;
+
+	ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
+	if (ret)
+		goto out_free_irq;
+
+	irq->irq_num = irq_num;
+	list_add(&irq->irqs_node, &dmc620_pmu_irqs);
+
+	return irq;
+
+out_free_irq:
+	free_irq(irq_num, irq);
+out_free_aff:
+	kfree(irq);
+	return ERR_PTR(ret);
+}
+
+static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
+{
+	struct dmc620_pmu_irq *irq;
+
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	irq = __dmc620_pmu_get_irq(irq_num);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	if (IS_ERR(irq))
+		return PTR_ERR(irq);
+
+	dmc620_pmu->irq = irq;
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	return 0;
+}
+
+static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
+{
+	struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
+
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	list_del_rcu(&dmc620_pmu->pmus_node);
+
+	if (!refcount_dec_and_test(&irq->refcount)) {
+		mutex_unlock(&dmc620_pmu_irqs_lock);
+		return;
+	}
+
+	list_del(&irq->irqs_node);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	free_irq(irq->irq_num, irq);
+	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
+	kfree(irq);
+}
+
+static int dmc620_pmu_event_init(struct perf_event *event)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	struct perf_event *sibling;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	/*
+	 * DMC 620 PMUs are shared across all cpus and cannot
+	 * support task bound and sampling events.
+	 */
+	if (is_sampling_event(event) ||
+		event->attach_state & PERF_ATTACH_TASK) {
+		dev_dbg(dmc620_pmu->pmu.dev,
+			"Can't support per-task counters\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Many perf core operations (eg. events rotation) operate on a
+	 * single CPU context. This is obvious for CPU PMUs, where one
+	 * expects the same sets of events being observed on all CPUs,
+	 * but can lead to issues for off-core PMUs, where each
+	 * event could be theoretically assigned to a different CPU. To
+	 * mitigate this, we enforce CPU assignment to one, selected
+	 * processor.
+	 */
+	event->cpu = dmc620_pmu->irq->cpu;
+	if (event->cpu < 0)
+		return -EINVAL;
+
+	/*
+	 * We must NOT create groups containing mixed PMUs, although software
+	 * events are acceptable.
+	 */
+	if (event->group_leader->pmu != event->pmu &&
+			!is_software_event(event->group_leader))
+		return -EINVAL;
+
+	for_each_sibling_event(sibling, event->group_leader) {
+		if (sibling->pmu != event->pmu &&
+				!is_software_event(sibling))
+			return -EINVAL;
+	}
+
+	hwc->idx = -1;
+	return 0;
+}
+
+static void dmc620_pmu_read(struct perf_event *event)
+{
+	dmc620_pmu_event_update(event);
+}
+
+static void dmc620_pmu_start(struct perf_event *event, int flags)
+{
+	event->hw.state = 0;
+	dmc620_pmu_event_set_period(event);
+	dmc620_pmu_enable_counter(event);
+}
+
+static void dmc620_pmu_stop(struct perf_event *event, int flags)
+{
+	if (event->hw.state & PERF_HES_STOPPED)
+		return;
+
+	dmc620_pmu_disable_counter(event);
+	dmc620_pmu_event_update(event);
+	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dmc620_pmu_add(struct perf_event *event, int flags)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+
+	idx = dmc620_get_event_idx(event);
+	if (idx < 0)
+		return idx;
+
+	hwc->idx = idx;
+	dmc620_pmu->events[idx] = event;
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+	if (flags & PERF_EF_START)
+		dmc620_pmu_start(event, PERF_EF_RELOAD);
+
+	perf_event_update_userpage(event);
+	return 0;
+}
+
+static void dmc620_pmu_del(struct perf_event *event, int flags)
+{
+	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	dmc620_pmu_stop(event, PERF_EF_UPDATE);
+	dmc620_pmu->events[idx] = NULL;
+	clear_bit(idx, dmc620_pmu->used_mask);
+	perf_event_update_userpage(event);
+}
+
+static int dmc620_pmu_cpu_teardown(unsigned int cpu,
+				   struct hlist_node *node)
+{
+	struct dmc620_pmu_irq *irq;
+	struct dmc620_pmu *dmc620_pmu;
+	unsigned int target;
+
+	irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
+	if (cpu != irq->cpu)
+		return 0;
+
+	target = cpumask_any_but(cpu_online_mask, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+
+	/* We're only reading, but this isn't the place to be involving RCU */
+	mutex_lock(&dmc620_pmu_irqs_lock);
+	list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
+		perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
+	mutex_unlock(&dmc620_pmu_irqs_lock);
+
+	WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
+	irq->cpu = target;
+
+	return 0;
+}
+
+static int dmc620_pmu_device_probe(struct platform_device *pdev)
+{
+	struct dmc620_pmu *dmc620_pmu;
+	struct resource *res;
+	char *name;
+	int irq_num;
+	int i, ret;
+
+	dmc620_pmu = devm_kzalloc(&pdev->dev,
+			sizeof(struct dmc620_pmu), GFP_KERNEL);
+	if (!dmc620_pmu)
+		return -ENOMEM;
+
+	dmc620_pmu->pdev = pdev;
+	platform_set_drvdata(pdev, dmc620_pmu);
+
+	dmc620_pmu->pmu = (struct pmu) {
+		.module = THIS_MODULE,
+		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
+		.task_ctx_nr	= perf_invalid_context,
+		.event_init	= dmc620_pmu_event_init,
+		.add		= dmc620_pmu_add,
+		.del		= dmc620_pmu_del,
+		.start		= dmc620_pmu_start,
+		.stop		= dmc620_pmu_stop,
+		.read		= dmc620_pmu_read,
+		.attr_groups	= dmc620_pmu_attr_groups,
+	};
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dmc620_pmu->base))
+		return PTR_ERR(dmc620_pmu->base);
+
+	/* Make sure device is reset before enabling interrupt */
+	for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
+		dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
+
+	irq_num = platform_get_irq(pdev, 0);
+	if (irq_num < 0)
+		return irq_num;
+
+	ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
+	if (ret)
+		return ret;
+
+	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+				  "%s_%llx", DMC620_PMUNAME,
+				  (res->start) >> DMC620_PA_SHIFT);
+	if (!name) {
+		dev_err(&pdev->dev,
+			  "Create name failed, PMU @%pa\n", &res->start);
+		goto out_teardown_dev;
+	}
+
+	ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
+	if (ret)
+		goto out_teardown_dev;
+
+	return 0;
+
+out_teardown_dev:
+	dmc620_pmu_put_irq(dmc620_pmu);
+	synchronize_rcu();
+	return ret;
+}
+
+static int dmc620_pmu_device_remove(struct platform_device *pdev)
+{
+	struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
+
+	dmc620_pmu_put_irq(dmc620_pmu);
+
+	/* perf will synchronise RCU before devres can free dmc620_pmu */
+	perf_pmu_unregister(&dmc620_pmu->pmu);
+
+	return 0;
+}
+
+static const struct acpi_device_id dmc620_acpi_match[] = {
+	{ "ARMHD620", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, dmc620_acpi_match);
+static struct platform_driver dmc620_pmu_driver = {
+	.driver	= {
+		.name		= DMC620_DRVNAME,
+		.acpi_match_table = dmc620_acpi_match,
+	},
+	.probe	= dmc620_pmu_device_probe,
+	.remove	= dmc620_pmu_device_remove,
+};
+
+static int __init dmc620_pmu_init(void)
+{
+	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+				      DMC620_DRVNAME,
+				      NULL,
+				      dmc620_pmu_cpu_teardown);
+	if (cpuhp_state_num < 0)
+		return cpuhp_state_num;
+
+	return platform_driver_register(&dmc620_pmu_driver);
+}
+
+static void __exit dmc620_pmu_exit(void)
+{
+	platform_driver_unregister(&dmc620_pmu_driver);
+	cpuhp_remove_multi_state(cpuhp_state_num);
+}
+
+module_init(dmc620_pmu_init);
+module_exit(dmc620_pmu_exit);
+
+MODULE_DESCRIPTION("Perf driver for the ARM DMC-620 memory controller");
+MODULE_AUTHOR("Tuan Phan <tuanphan@os.amperecomputing.com");
+MODULE_LICENSE("GPL v2");
-- 
2.7.4


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
  2020-10-20 23:10 ` Tuan Phan
@ 2020-10-21 10:38   ` kernel test robot
  -1 siblings, 0 replies; 12+ messages in thread
From: kernel test robot @ 2020-10-21 10:38 UTC (permalink / raw)
  To: Tuan Phan
  Cc: kbuild-all, patches, mark.rutland, robin.murphy, Will Deacon,
	linux-kernel, linux-arm-kernel

[-- Attachment #1: Type: text/plain, Size: 6182 bytes --]

Hi Tuan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.9 next-20201021]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Tuan-Phan/driver-perf-Add-PMU-driver-for-the-ARM-DMC-620-memory-controller/20201021-071241
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git c4d6fe7311762f2e03b3c27ad38df7c40c80cc93
config: arm-allyesconfig (attached as .config)
compiler: arm-linux-gnueabi-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/95212570823403300bcf4fa4e38de366eeeee60c
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Tuan-Phan/driver-perf-Add-PMU-driver-for-the-ARM-DMC-620-memory-controller/20201021-071241
        git checkout 95212570823403300bcf4fa4e38de366eeeee60c
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   In file included from include/linux/ioport.h:15,
                    from include/linux/acpi.h:12,
                    from drivers/perf/arm_dmc620_pmu.c:12:
   drivers/perf/arm_dmc620_pmu.c: In function 'dmc620_pmu_enable_counter':
   include/linux/bits.h:36:11: warning: right shift count is negative [-Wshift-count-negative]
      36 |   (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
         |           ^~
   include/linux/bits.h:38:31: note: in expansion of macro '__GENMASK'
      38 |  (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
         |                               ^~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:202:27: note: in expansion of macro 'GENMASK'
     202 |  ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
         |                           ^~~~~~~
   drivers/perf/arm_dmc620_pmu.c:205:2: note: in expansion of macro '_ATTR_CFG_GET_FLD'
     205 |  _ATTR_CFG_GET_FLD(attr,     \
         |  ^~~~~~~~~~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:328:8: note: in expansion of macro 'ATTR_CFG_GET_FLD'
     328 |  reg = ATTR_CFG_GET_FLD(attr, mask);
         |        ^~~~~~~~~~~~~~~~
   include/linux/bits.h:36:11: warning: right shift count is negative [-Wshift-count-negative]
      36 |   (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
         |           ^~
   include/linux/bits.h:38:31: note: in expansion of macro '__GENMASK'
      38 |  (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
         |                               ^~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:202:27: note: in expansion of macro 'GENMASK'
     202 |  ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
         |                           ^~~~~~~
   drivers/perf/arm_dmc620_pmu.c:205:2: note: in expansion of macro '_ATTR_CFG_GET_FLD'
     205 |  _ATTR_CFG_GET_FLD(attr,     \
         |  ^~~~~~~~~~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:334:8: note: in expansion of macro 'ATTR_CFG_GET_FLD'
     334 |  reg = ATTR_CFG_GET_FLD(attr, match);
         |        ^~~~~~~~~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c: In function 'dmc620_pmu_device_probe':
>> drivers/perf/arm_dmc620_pmu.c:677:14: warning: format '%llx' expects argument of type 'long long unsigned int', but argument 5 has type 'resource_size_t' {aka 'unsigned int'} [-Wformat=]
     677 |       "%s_%llx", DMC620_PMUNAME,
         |           ~~~^
         |              |
         |              long long unsigned int
         |           %x

vim +677 drivers/perf/arm_dmc620_pmu.c

   629	
   630	static int dmc620_pmu_device_probe(struct platform_device *pdev)
   631	{
   632		struct dmc620_pmu *dmc620_pmu;
   633		struct resource *res;
   634		char *name;
   635		int irq_num;
   636		int i, ret;
   637	
   638		dmc620_pmu = devm_kzalloc(&pdev->dev,
   639				sizeof(struct dmc620_pmu), GFP_KERNEL);
   640		if (!dmc620_pmu)
   641			return -ENOMEM;
   642	
   643		dmc620_pmu->pdev = pdev;
   644		platform_set_drvdata(pdev, dmc620_pmu);
   645	
   646		dmc620_pmu->pmu = (struct pmu) {
   647			.module = THIS_MODULE,
   648			.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
   649			.task_ctx_nr	= perf_invalid_context,
   650			.event_init	= dmc620_pmu_event_init,
   651			.add		= dmc620_pmu_add,
   652			.del		= dmc620_pmu_del,
   653			.start		= dmc620_pmu_start,
   654			.stop		= dmc620_pmu_stop,
   655			.read		= dmc620_pmu_read,
   656			.attr_groups	= dmc620_pmu_attr_groups,
   657		};
   658	
   659		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   660		dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
   661		if (IS_ERR(dmc620_pmu->base))
   662			return PTR_ERR(dmc620_pmu->base);
   663	
   664		/* Make sure device is reset before enabling interrupt */
   665		for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
   666			dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
   667	
   668		irq_num = platform_get_irq(pdev, 0);
   669		if (irq_num < 0)
   670			return irq_num;
   671	
   672		ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
   673		if (ret)
   674			return ret;
   675	
   676		name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
 > 677					  "%s_%llx", DMC620_PMUNAME,
   678					  (res->start) >> DMC620_PA_SHIFT);
   679		if (!name) {
   680			dev_err(&pdev->dev,
   681				  "Create name failed, PMU @%pa\n", &res->start);
   682			goto out_teardown_dev;
   683		}
   684	
   685		ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
   686		if (ret)
   687			goto out_teardown_dev;
   688	
   689		return 0;
   690	
   691	out_teardown_dev:
   692		dmc620_pmu_put_irq(dmc620_pmu);
   693		synchronize_rcu();
   694		return ret;
   695	}
   696	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 77015 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
@ 2020-10-21 10:38   ` kernel test robot
  0 siblings, 0 replies; 12+ messages in thread
From: kernel test robot @ 2020-10-21 10:38 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 6329 bytes --]

Hi Tuan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.9 next-20201021]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Tuan-Phan/driver-perf-Add-PMU-driver-for-the-ARM-DMC-620-memory-controller/20201021-071241
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git c4d6fe7311762f2e03b3c27ad38df7c40c80cc93
config: arm-allyesconfig (attached as .config)
compiler: arm-linux-gnueabi-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/95212570823403300bcf4fa4e38de366eeeee60c
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Tuan-Phan/driver-perf-Add-PMU-driver-for-the-ARM-DMC-620-memory-controller/20201021-071241
        git checkout 95212570823403300bcf4fa4e38de366eeeee60c
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   In file included from include/linux/ioport.h:15,
                    from include/linux/acpi.h:12,
                    from drivers/perf/arm_dmc620_pmu.c:12:
   drivers/perf/arm_dmc620_pmu.c: In function 'dmc620_pmu_enable_counter':
   include/linux/bits.h:36:11: warning: right shift count is negative [-Wshift-count-negative]
      36 |   (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
         |           ^~
   include/linux/bits.h:38:31: note: in expansion of macro '__GENMASK'
      38 |  (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
         |                               ^~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:202:27: note: in expansion of macro 'GENMASK'
     202 |  ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
         |                           ^~~~~~~
   drivers/perf/arm_dmc620_pmu.c:205:2: note: in expansion of macro '_ATTR_CFG_GET_FLD'
     205 |  _ATTR_CFG_GET_FLD(attr,     \
         |  ^~~~~~~~~~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:328:8: note: in expansion of macro 'ATTR_CFG_GET_FLD'
     328 |  reg = ATTR_CFG_GET_FLD(attr, mask);
         |        ^~~~~~~~~~~~~~~~
   include/linux/bits.h:36:11: warning: right shift count is negative [-Wshift-count-negative]
      36 |   (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
         |           ^~
   include/linux/bits.h:38:31: note: in expansion of macro '__GENMASK'
      38 |  (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
         |                               ^~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:202:27: note: in expansion of macro 'GENMASK'
     202 |  ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
         |                           ^~~~~~~
   drivers/perf/arm_dmc620_pmu.c:205:2: note: in expansion of macro '_ATTR_CFG_GET_FLD'
     205 |  _ATTR_CFG_GET_FLD(attr,     \
         |  ^~~~~~~~~~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c:334:8: note: in expansion of macro 'ATTR_CFG_GET_FLD'
     334 |  reg = ATTR_CFG_GET_FLD(attr, match);
         |        ^~~~~~~~~~~~~~~~
   drivers/perf/arm_dmc620_pmu.c: In function 'dmc620_pmu_device_probe':
>> drivers/perf/arm_dmc620_pmu.c:677:14: warning: format '%llx' expects argument of type 'long long unsigned int', but argument 5 has type 'resource_size_t' {aka 'unsigned int'} [-Wformat=]
     677 |       "%s_%llx", DMC620_PMUNAME,
         |           ~~~^
         |              |
         |              long long unsigned int
         |           %x

vim +677 drivers/perf/arm_dmc620_pmu.c

   629	
   630	static int dmc620_pmu_device_probe(struct platform_device *pdev)
   631	{
   632		struct dmc620_pmu *dmc620_pmu;
   633		struct resource *res;
   634		char *name;
   635		int irq_num;
   636		int i, ret;
   637	
   638		dmc620_pmu = devm_kzalloc(&pdev->dev,
   639				sizeof(struct dmc620_pmu), GFP_KERNEL);
   640		if (!dmc620_pmu)
   641			return -ENOMEM;
   642	
   643		dmc620_pmu->pdev = pdev;
   644		platform_set_drvdata(pdev, dmc620_pmu);
   645	
   646		dmc620_pmu->pmu = (struct pmu) {
   647			.module = THIS_MODULE,
   648			.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
   649			.task_ctx_nr	= perf_invalid_context,
   650			.event_init	= dmc620_pmu_event_init,
   651			.add		= dmc620_pmu_add,
   652			.del		= dmc620_pmu_del,
   653			.start		= dmc620_pmu_start,
   654			.stop		= dmc620_pmu_stop,
   655			.read		= dmc620_pmu_read,
   656			.attr_groups	= dmc620_pmu_attr_groups,
   657		};
   658	
   659		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   660		dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
   661		if (IS_ERR(dmc620_pmu->base))
   662			return PTR_ERR(dmc620_pmu->base);
   663	
   664		/* Make sure device is reset before enabling interrupt */
   665		for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
   666			dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
   667	
   668		irq_num = platform_get_irq(pdev, 0);
   669		if (irq_num < 0)
   670			return irq_num;
   671	
   672		ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
   673		if (ret)
   674			return ret;
   675	
   676		name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
 > 677					  "%s_%llx", DMC620_PMUNAME,
   678					  (res->start) >> DMC620_PA_SHIFT);
   679		if (!name) {
   680			dev_err(&pdev->dev,
   681				  "Create name failed, PMU @%pa\n", &res->start);
   682			goto out_teardown_dev;
   683		}
   684	
   685		ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
   686		if (ret)
   687			goto out_teardown_dev;
   688	
   689		return 0;
   690	
   691	out_teardown_dev:
   692		dmc620_pmu_put_irq(dmc620_pmu);
   693		synchronize_rcu();
   694		return ret;
   695	}
   696	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 77015 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
  2020-10-20 23:10 ` Tuan Phan
@ 2020-10-22 17:34   ` Robin Murphy
  -1 siblings, 0 replies; 12+ messages in thread
From: Robin Murphy @ 2020-10-22 17:34 UTC (permalink / raw)
  To: Tuan Phan
  Cc: patches, mark.rutland, Will Deacon, linux-kernel, linux-arm-kernel

On 2020-10-21 00:10, Tuan Phan wrote:
> DMC-620 PMU supports total 10 counters which each is
> independently programmable to different events and can
> be started and stopped individually.
> 
> Currently, it only supports ACPI. Other platforms feel free to test and add
> support for device tree.
> 
> Usage example:
>    #perf stat -e arm_dmc620_10008c000/clk_cycle_count/ -C 0
>    Get perf event for clk_cycle_count counter.
> 
>    #perf stat -e arm_dmc620_10008c000/clkdiv2_allocate,mask=0x1f,match=0x2f,
>    incr=2,invert=1/ -C 0
>    The above example shows how to specify mask, match, incr,
>    invert parameters for clkdiv2_allocate event.
> 
> Signed-off-by: Tuan Phan <tuanphan@os.amperecomputing.com>
> ---
> Changes in v4:
> - Addressed Robin's comments.
> 
> Changes in v3:
> - Removed "_OFFSET" suffix.
> - Renamed "affinity" to "irq".
> - Have a better definition of group register.
> 
> Changes in v2:
> - Removed IRQF_SHARED flag and added support for multiple
> PMUs sharing the same interrupt.
> - Fixed an interrupt handler race condition.
> 
> The ACPI binding spec for PMU DMC620 can be downloaded at:
> https://developer.arm.com/documentation/den0093/c/
> 
>   drivers/perf/Kconfig          |   7 +
>   drivers/perf/Makefile         |   1 +
>   drivers/perf/arm_dmc620_pmu.c | 746 ++++++++++++++++++++++++++++++++++++++++++
>   3 files changed, 754 insertions(+)
>   create mode 100644 drivers/perf/arm_dmc620_pmu.c
> 
> diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
> index 130327ff..3075cf1 100644
> --- a/drivers/perf/Kconfig
> +++ b/drivers/perf/Kconfig
> @@ -130,6 +130,13 @@ config ARM_SPE_PMU
>   	  Extension, which provides periodic sampling of operations in
>   	  the CPU pipeline and reports this via the perf AUX interface.
>   
> +config ARM_DMC620_PMU
> +	tristate "Enable PMU support for the ARM DMC-620 memory controller"
> +	depends on (ARM64 && ACPI) || COMPILE_TEST
> +	help
> +	  Support for PMU events monitoring on the ARM DMC-620 memory
> +	  controller.
> +
>   source "drivers/perf/hisilicon/Kconfig"
>   
>   endmenu
> diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
> index 5365fd5..5260b11 100644
> --- a/drivers/perf/Makefile
> +++ b/drivers/perf/Makefile
> @@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
>   obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
>   obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
>   obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
> +obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
> diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
> new file mode 100644
> index 0000000..d72cd54
> --- /dev/null
> +++ b/drivers/perf/arm_dmc620_pmu.c
> @@ -0,0 +1,746 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * ARM DMC-620 memory controller PMU driver
> + *
> + * Copyright (C) 2020 Ampere Computing LLC.
> + */
> +
> +#define DMC620_PMUNAME		"arm_dmc620"
> +#define DMC620_DRVNAME		DMC620_PMUNAME "_pmu"
> +#define pr_fmt(fmt)		DMC620_DRVNAME ": " fmt
> +
> +#include <linux/acpi.h>
> +#include <linux/bitfield.h>
> +#include <linux/bitops.h>
> +#include <linux/cpuhotplug.h>
> +#include <linux/cpumask.h>
> +#include <linux/device.h>
> +#include <linux/errno.h>
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/kernel.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/mutex.h>
> +#include <linux/perf_event.h>
> +#include <linux/perf/arm_pmu.h>
> +#include <linux/platform_device.h>
> +#include <linux/printk.h>
> +#include <linux/rculist.h>
> +#include <linux/refcount.h>
> +
> +#define DMC620_PA_SHIFT					12
> +#define DMC620_CNT_INIT					0x80000000
> +#define DMC620_CNT_MAX_PERIOD				0xffffffff
> +#define DMC620_PMU_CLKDIV2_MAX_COUNTERS			8
> +#define DMC620_PMU_CLK_MAX_COUNTERS			2
> +#define DMC620_PMU_MAX_COUNTERS				\
> +	(DMC620_PMU_CLKDIV2_MAX_COUNTERS + DMC620_PMU_CLK_MAX_COUNTERS)
> +
> +/*
> + * The PMU registers start at 0xA00 in the DMC-620 memory map, and these
> + * offsets are relative to that base.
> + *
> + * Each counter has a group of control/value registers, and the
> + * DMC620_PMU_COUNTERn offsets are within a counter group.
> + *
> + * The counter registers groups start at 0xA10.
> + */
> +#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2		0x8
> +#define  DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK	\
> +		(DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1)
> +#define DMC620_PMU_OVERFLOW_STATUS_CLK			0xC
> +#define  DMC620_PMU_OVERFLOW_STATUS_CLK_MASK		\
> +		(DMC620_PMU_CLK_MAX_COUNTERS - 1)
> +#define DMC620_PMU_COUNTERS_BASE			0x10
> +#define DMC620_PMU_COUNTERn_MASK_31_00			0x0
> +#define DMC620_PMU_COUNTERn_MASK_63_32			0x4
> +#define DMC620_PMU_COUNTERn_MATCH_31_00			0x8
> +#define DMC620_PMU_COUNTERn_MATCH_63_32			0xC
> +#define DMC620_PMU_COUNTERn_CONTROL			0x10
> +#define  DMC620_PMU_COUNTERn_CONTROL_ENABLE		BIT(0)
> +#define  DMC620_PMU_COUNTERn_CONTROL_INVERT		BIT(1)
> +#define  DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX		GENMASK(6, 2)
> +#define  DMC620_PMU_COUNTERn_CONTROL_INCR_MUX		GENMASK(8, 7)
> +#define DMC620_PMU_COUNTERn_VALUE			0x20
> +/* Offset of the registers for a given counter, relative to 0xA00 */
> +#define DMC620_PMU_COUNTERn_OFFSET(n) \
> +	(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
> +
> +static LIST_HEAD(dmc620_pmu_irqs);
> +static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
> +
> +struct dmc620_pmu_irq {
> +	struct hlist_node node;
> +	struct list_head pmus_node;
> +	struct list_head irqs_node;
> +	refcount_t refcount;
> +	unsigned int irq_num;
> +	unsigned int cpu;
> +};
> +
> +struct dmc620_pmu {
> +	struct pmu pmu;
> +	struct platform_device *pdev;
> +
> +	void __iomem *base;
> +	struct dmc620_pmu_irq *irq;
> +	struct list_head pmus_node;
> +
> +	/*
> +	 * We put all clkdiv2 and clk counters to a same array.
> +	 * The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to
> +	 * clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS
> +	 * belong to clk counters.
> +	 */
> +	DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS);
> +	struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
> +};
> +
> +#define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
> +
> +static int cpuhp_state_num;
> +
> +struct dmc620_pmu_event_attr {
> +	struct device_attribute attr;
> +	u8 clkdiv2;
> +	u8 eventid;
> +};
> +
> +static ssize_t
> +dmc620_pmu_event_show(struct device *dev,
> +			   struct device_attribute *attr, char *page)
> +{
> +	struct dmc620_pmu_event_attr *eattr;
> +
> +	eattr = container_of(attr, typeof(*eattr), attr);
> +
> +	return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
> +}
> +
> +#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2)		\
> +	(&((struct dmc620_pmu_event_attr[]) {{				\
> +		.attr = __ATTR(_name, 0444, dmc620_pmu_event_show, NULL),	\
> +		.clkdiv2 = _clkdiv2,						\
> +		.eventid = _eventid,					\
> +	}})[0].attr.attr)
> +
> +static struct attribute *dmc620_pmu_events_attrs[] = {
> +	/* clkdiv2 events list */
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_cycle_count, 0x0, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_allocate, 0x1, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_queue_depth, 0x2, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_wr_data, 0x3, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_read_backlog, 0x4, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_mi, 0x5, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_hazard_resolution, 0x6, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_enqueue, 0x7, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_arbitrate, 0x8, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_lrank_turnaround_activate, 0x9, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_prank_turnaround_activate, 0xa, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_read_depth, 0xb, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_write_depth, 0xc, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_highigh_qos_depth, 0xd, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_high_qos_depth, 0xe, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_medium_qos_depth, 0xf, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_low_qos_depth, 0x10, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_activate, 0x11, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_rdwr, 0x12, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_refresh, 0x13, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_training_request, 0x14, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_t_mac_tracker, 0x15, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_fsm_tracker, 0x16, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_open_tracker, 0x17, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_pwr_down, 0x18, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_sref, 0x19, 1),
> +
> +	/* clk events list */
> +	DMC620_PMU_EVENT_ATTR(clk_cycle_count, 0x0, 0),
> +	DMC620_PMU_EVENT_ATTR(clk_request, 0x1, 0),
> +	DMC620_PMU_EVENT_ATTR(clk_upload_stall, 0x2, 0),
> +	NULL,
> +};
> +
> +static struct attribute_group dmc620_pmu_events_attr_group = {
> +	.name = "events",
> +	.attrs = dmc620_pmu_events_attrs,
> +};
> +
> +/* User ABI */
> +#define ATTR_CFG_FLD_mask_CFG		config
> +#define ATTR_CFG_FLD_mask_LO		0
> +#define ATTR_CFG_FLD_mask_HI		44
> +#define ATTR_CFG_FLD_match_CFG		config1
> +#define ATTR_CFG_FLD_match_LO		0
> +#define ATTR_CFG_FLD_match_HI		44
> +#define ATTR_CFG_FLD_invert_CFG		config2
> +#define ATTR_CFG_FLD_invert_LO		0
> +#define ATTR_CFG_FLD_invert_HI		0
> +#define ATTR_CFG_FLD_incr_CFG		config2
> +#define ATTR_CFG_FLD_incr_LO		1
> +#define ATTR_CFG_FLD_incr_HI		2
> +#define ATTR_CFG_FLD_event_CFG		config2
> +#define ATTR_CFG_FLD_event_LO		3
> +#define ATTR_CFG_FLD_event_HI		8
> +#define ATTR_CFG_FLD_clkdiv2_CFG	config2
> +#define ATTR_CFG_FLD_clkdiv2_LO		9
> +#define ATTR_CFG_FLD_clkdiv2_HI		9
> +
> +#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
> +	(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
> +
> +#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
> +	__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
> +
> +#define GEN_PMU_FORMAT_ATTR(name)				\
> +	PMU_FORMAT_ATTR(name,					\
> +	_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG,		\
> +			     ATTR_CFG_FLD_##name##_LO,		\
> +			     ATTR_CFG_FLD_##name##_HI))
> +
> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))

As per the buildbot report, GENMASK_ULL() would be appropriate when the 
other side is a u64 (although either way this does look a lot like 
reinventing FIELD_GET()...)

> +#define ATTR_CFG_GET_FLD(attr, name)				\
> +	_ATTR_CFG_GET_FLD(attr,					\
> +			  ATTR_CFG_FLD_##name##_CFG,		\
> +			  ATTR_CFG_FLD_##name##_LO,		\
> +			  ATTR_CFG_FLD_##name##_HI)
> +
> +GEN_PMU_FORMAT_ATTR(mask);
> +GEN_PMU_FORMAT_ATTR(match);
> +GEN_PMU_FORMAT_ATTR(invert);
> +GEN_PMU_FORMAT_ATTR(incr);
> +GEN_PMU_FORMAT_ATTR(event);
> +GEN_PMU_FORMAT_ATTR(clkdiv2);
> +
> +static struct attribute *dmc620_pmu_formats_attrs[] = {
> +	&format_attr_mask.attr,
> +	&format_attr_match.attr,
> +	&format_attr_invert.attr,
> +	&format_attr_incr.attr,
> +	&format_attr_event.attr,
> +	&format_attr_clkdiv2.attr,
> +	NULL,
> +};
> +
> +static struct attribute_group dmc620_pmu_format_attr_group = {
> +	.name	= "format",
> +	.attrs	= dmc620_pmu_formats_attrs,
> +};
> +
> +static const struct attribute_group *dmc620_pmu_attr_groups[] = {
> +	&dmc620_pmu_events_attr_group,
> +	&dmc620_pmu_format_attr_group,
> +	NULL,
> +};
> +
> +static inline
> +u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
> +			unsigned int idx, unsigned int reg)
> +{
> +	return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
> +}
> +
> +static inline
> +void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
> +			unsigned int idx, unsigned int reg, u32 val)
> +{
> +	writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
> +}
> +
> +static
> +unsigned int dmc620_event_to_counter_control(struct perf_event *event)
> +{
> +	struct perf_event_attr *attr = &event->attr;
> +	unsigned int reg = 0;
> +
> +	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INVERT,

FIELD_PREP(), surely? (all 3 times)

> +			ATTR_CFG_GET_FLD(attr, invert));
> +	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INCR_MUX,
> +			ATTR_CFG_GET_FLD(attr, incr));
> +	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX,
> +			ATTR_CFG_GET_FLD(attr, event));

As a cheeky micro-optimisation, consider laying out the relevant fields 
in the config word in the same order as the corresponding register 
fields, such that the compiler can ultimately optimise this whole lot 
together into a single mask/shift operation.

> +	return reg;
> +}
> +
> +static int dmc620_get_event_idx(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	int idx, start_idx, end_idx;
> +
> +	if (ATTR_CFG_GET_FLD(&event->attr, clkdiv2)) {
> +		start_idx = 0;
> +		end_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
> +	} else {
> +		start_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
> +		end_idx = DMC620_PMU_MAX_COUNTERS;
> +	}
> +
> +	for (idx = start_idx; idx < end_idx; ++idx) {
> +		if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
> +			return idx;
> +	}
> +
> +	/* The counters are all in use. */
> +	return -EAGAIN;
> +}
> +
> +static u64 dmc620_pmu_read_counter(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +
> +	return dmc620_pmu_creg_read(dmc620_pmu,
> +				    event->hw.idx, DMC620_PMU_COUNTERn_VALUE);
> +}

Might it be simpler to inline this in its sole caller? I'm not sure it 
adds any clarity to have *two* levels of trivial indirection around a 
simple register read.

> +static void dmc620_pmu_event_update(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	u64 delta, prev_count, new_count;
> +
> +	do {
> +		/* We may also be called from the irq handler */
> +		prev_count = local64_read(&hwc->prev_count);
> +		new_count = dmc620_pmu_read_counter(event);
> +	} while (local64_cmpxchg(&hwc->prev_count,
> +			prev_count, new_count) != prev_count);
> +	delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
> +	local64_add(delta, &event->count);
> +}
> +
> +static void dmc620_pmu_event_set_period(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +
> +	local64_set(&event->hw.prev_count, DMC620_CNT_INIT);
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_VALUE, DMC620_CNT_INIT);
> +}
> +
> +static void dmc620_pmu_enable_counter(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct perf_event_attr *attr = &event->attr;
> +	u64 reg;
> +
> +	reg = ATTR_CFG_GET_FLD(attr, mask);
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_31_00, lower_32_bits(reg));
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_63_32, upper_32_bits(reg));
> +
> +	reg = ATTR_CFG_GET_FLD(attr, match);
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_31_00, lower_32_bits(reg));
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_63_32, upper_32_bits(reg));

Do the mask and match values have to be rewritten every time, or could 
those be set once in event_add() such that you can just toggle the 
control register to start and stop the underlying counter?

> +	reg = dmc620_event_to_counter_control(event) | DMC620_PMU_COUNTERn_CONTROL_ENABLE;
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, (u32)reg);
> +}
> +
> +static void dmc620_pmu_disable_counter(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, 0);
> +}
> +
> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
> +{
> +	struct dmc620_pmu_irq *irq = data;
> +	struct dmc620_pmu *dmc620_pmu;
> +	irqreturn_t ret = IRQ_NONE;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
> +		unsigned long status;
> +		struct perf_event *event;
> +		unsigned int idx;
> +
> +		/*
> +		 * HW doesn't provide a control to atomically disable all counters.
> +		 * To prevent race condition, disable all events before continuing
> +		 */

I'm still doubtful that this doesn't introduce more inaccuracy overall 
than whatever it's trying to avoid... :/

> +		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
> +			event = dmc620_pmu->events[idx];
> +			if (!event)
> +				continue;
> +			dmc620_pmu_disable_counter(event);
> +		}
> +
> +		status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
> +		status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
> +				DMC620_PMU_CLKDIV2_MAX_COUNTERS);
> +		if (!status)
> +			continue;

Oh hey, remember that time you unconditionally disabled all the live 
counters on the way in? ;)

> +		for_each_set_bit(idx, &status,
> +				  DMC620_PMU_MAX_COUNTERS) {
> +			event = dmc620_pmu->events[idx];
> +			if (WARN_ON_ONCE(!event))
> +				continue;
> +			dmc620_pmu_event_update(event);
> +			dmc620_pmu_event_set_period(event);
> +		}
> +
> +		if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
> +			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
> +
> +		if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
> +			DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
> +			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
> +
> +		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
> +			event = dmc620_pmu->events[idx];
> +			if (!event)
> +				continue;
> +			if (!(event->hw.state & PERF_HES_STOPPED))
> +				dmc620_pmu_enable_counter(event);
> +		}
> +
> +		ret = IRQ_HANDLED;
> +	}
> +	rcu_read_unlock();
> +
> +	return ret;
> +}
> +
> +static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
> +{
> +	struct dmc620_pmu_irq *irq;
> +	int ret;
> +
> +	list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
> +		if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
> +			return irq;
> +
> +	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
> +	if (!irq)
> +		return ERR_PTR(-ENOMEM);
> +
> +	INIT_LIST_HEAD(&irq->pmus_node);
> +
> +	/* Pick one CPU to be the preferred one to use */
> +	irq->cpu = raw_smp_processor_id();
> +	refcount_set(&irq->refcount, 1);
> +
> +	ret = request_irq(irq_num, dmc620_pmu_handle_irq,
> +			  IRQF_NOBALANCING | IRQF_NO_THREAD,
> +			  "dmc620-pmu", irq);
> +	if (ret)
> +		goto out_free_aff;
> +
> +	ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
> +	if (ret)
> +		goto out_free_irq;
> +
> +	ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
> +	if (ret)
> +		goto out_free_irq;
> +
> +	irq->irq_num = irq_num;
> +	list_add(&irq->irqs_node, &dmc620_pmu_irqs);
> +
> +	return irq;
> +
> +out_free_irq:
> +	free_irq(irq_num, irq);
> +out_free_aff:
> +	kfree(irq);
> +	return ERR_PTR(ret);
> +}
> +
> +static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
> +{
> +	struct dmc620_pmu_irq *irq;
> +
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	irq = __dmc620_pmu_get_irq(irq_num);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	if (IS_ERR(irq))
> +		return PTR_ERR(irq);
> +
> +	dmc620_pmu->irq = irq;
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	return 0;
> +}
> +
> +static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
> +{
> +	struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
> +
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	list_del_rcu(&dmc620_pmu->pmus_node);
> +
> +	if (!refcount_dec_and_test(&irq->refcount)) {
> +		mutex_unlock(&dmc620_pmu_irqs_lock);
> +		return;
> +	}
> +
> +	list_del(&irq->irqs_node);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	free_irq(irq->irq_num, irq);
> +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
> +	kfree(irq);
> +}
> +
> +static int dmc620_pmu_event_init(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct perf_event *sibling;
> +
> +	if (event->attr.type != event->pmu->type)
> +		return -ENOENT;
> +
> +	/*
> +	 * DMC 620 PMUs are shared across all cpus and cannot
> +	 * support task bound and sampling events.
> +	 */
> +	if (is_sampling_event(event) ||
> +		event->attach_state & PERF_ATTACH_TASK) {
> +		dev_dbg(dmc620_pmu->pmu.dev,
> +			"Can't support per-task counters\n");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	/*
> +	 * Many perf core operations (eg. events rotation) operate on a
> +	 * single CPU context. This is obvious for CPU PMUs, where one
> +	 * expects the same sets of events being observed on all CPUs,
> +	 * but can lead to issues for off-core PMUs, where each
> +	 * event could be theoretically assigned to a different CPU. To
> +	 * mitigate this, we enforce CPU assignment to one, selected
> +	 * processor.
> +	 */
> +	event->cpu = dmc620_pmu->irq->cpu;
> +	if (event->cpu < 0)
> +		return -EINVAL;
> +
> +	/*
> +	 * We must NOT create groups containing mixed PMUs, although software
> +	 * events are acceptable.
> +	 */
> +	if (event->group_leader->pmu != event->pmu &&
> +			!is_software_event(event->group_leader))
> +		return -EINVAL;
> +
> +	for_each_sibling_event(sibling, event->group_leader) {
> +		if (sibling->pmu != event->pmu &&
> +				!is_software_event(sibling))
> +			return -EINVAL;
> +	}

As before, if you can't start, stop, or read multiple counters 
atomically, you can't really support groups of events for this PMU 
either. It's impossible to measure accurate ratios with a variable 
amount of skew between individual counters.

> +	hwc->idx = -1;
> +	return 0;
> +}
> +
> +static void dmc620_pmu_read(struct perf_event *event)
> +{
> +	dmc620_pmu_event_update(event);
> +}
> +
> +static void dmc620_pmu_start(struct perf_event *event, int flags)
> +{
> +	event->hw.state = 0;
> +	dmc620_pmu_event_set_period(event);
> +	dmc620_pmu_enable_counter(event);
> +}
> +
> +static void dmc620_pmu_stop(struct perf_event *event, int flags)
> +{
> +	if (event->hw.state & PERF_HES_STOPPED)
> +		return;
> +
> +	dmc620_pmu_disable_counter(event);
> +	dmc620_pmu_event_update(event);
> +	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +}
> +
> +static int dmc620_pmu_add(struct perf_event *event, int flags)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx;
> +
> +	idx = dmc620_get_event_idx(event);
> +	if (idx < 0)
> +		return idx;
> +
> +	hwc->idx = idx;
> +	dmc620_pmu->events[idx] = event;
> +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +
> +	if (flags & PERF_EF_START)
> +		dmc620_pmu_start(event, PERF_EF_RELOAD);
> +
> +	perf_event_update_userpage(event);
> +	return 0;
> +}
> +
> +static void dmc620_pmu_del(struct perf_event *event, int flags)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx = hwc->idx;
> +
> +	dmc620_pmu_stop(event, PERF_EF_UPDATE);
> +	dmc620_pmu->events[idx] = NULL;
> +	clear_bit(idx, dmc620_pmu->used_mask);
> +	perf_event_update_userpage(event);
> +}
> +
> +static int dmc620_pmu_cpu_teardown(unsigned int cpu,
> +				   struct hlist_node *node)
> +{
> +	struct dmc620_pmu_irq *irq;
> +	struct dmc620_pmu *dmc620_pmu;
> +	unsigned int target;
> +
> +	irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
> +	if (cpu != irq->cpu)
> +		return 0;
> +
> +	target = cpumask_any_but(cpu_online_mask, cpu);
> +	if (target >= nr_cpu_ids)
> +		return 0;
> +
> +	/* We're only reading, but this isn't the place to be involving RCU */
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
> +		perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
> +	irq->cpu = target;
> +
> +	return 0;
> +}
> +
> +static int dmc620_pmu_device_probe(struct platform_device *pdev)
> +{
> +	struct dmc620_pmu *dmc620_pmu;
> +	struct resource *res;
> +	char *name;
> +	int irq_num;
> +	int i, ret;
> +
> +	dmc620_pmu = devm_kzalloc(&pdev->dev,
> +			sizeof(struct dmc620_pmu), GFP_KERNEL);
> +	if (!dmc620_pmu)
> +		return -ENOMEM;
> +
> +	dmc620_pmu->pdev = pdev;

Is this ever used?

> +	platform_set_drvdata(pdev, dmc620_pmu);
> +
> +	dmc620_pmu->pmu = (struct pmu) {
> +		.module = THIS_MODULE,
> +		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
> +		.task_ctx_nr	= perf_invalid_context,
> +		.event_init	= dmc620_pmu_event_init,
> +		.add		= dmc620_pmu_add,
> +		.del		= dmc620_pmu_del,
> +		.start		= dmc620_pmu_start,
> +		.stop		= dmc620_pmu_stop,
> +		.read		= dmc620_pmu_read,
> +		.attr_groups	= dmc620_pmu_attr_groups,
> +	};
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
> +	if (IS_ERR(dmc620_pmu->base))
> +		return PTR_ERR(dmc620_pmu->base);
> +
> +	/* Make sure device is reset before enabling interrupt */
> +	for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
> +		dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);

Maybe make sure the overflow registers are clear too?

> +	irq_num = platform_get_irq(pdev, 0);
> +	if (irq_num < 0)
> +		return irq_num;
> +
> +	ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
> +	if (ret)
> +		return ret;
> +
> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
> +				  "%s_%llx", DMC620_PMUNAME,
> +				  (res->start) >> DMC620_PA_SHIFT);

res->start doesn't need parentheses, however I guess it might need 
casting to u64 to solve the build warning (I'm not sure there's any 
nicer way, unfortunately).

Robin.

> +	if (!name) {
> +		dev_err(&pdev->dev,
> +			  "Create name failed, PMU @%pa\n", &res->start);
> +		goto out_teardown_dev;
> +	}
> +
> +	ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
> +	if (ret)
> +		goto out_teardown_dev;
> +
> +	return 0;
> +
> +out_teardown_dev:
> +	dmc620_pmu_put_irq(dmc620_pmu);
> +	synchronize_rcu();
> +	return ret;
> +}
> +
> +static int dmc620_pmu_device_remove(struct platform_device *pdev)
> +{
> +	struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
> +
> +	dmc620_pmu_put_irq(dmc620_pmu);
> +
> +	/* perf will synchronise RCU before devres can free dmc620_pmu */
> +	perf_pmu_unregister(&dmc620_pmu->pmu);
> +
> +	return 0;
> +}
> +
> +static const struct acpi_device_id dmc620_acpi_match[] = {
> +	{ "ARMHD620", 0},
> +	{},
> +};
> +MODULE_DEVICE_TABLE(acpi, dmc620_acpi_match);
> +static struct platform_driver dmc620_pmu_driver = {
> +	.driver	= {
> +		.name		= DMC620_DRVNAME,
> +		.acpi_match_table = dmc620_acpi_match,
> +	},
> +	.probe	= dmc620_pmu_device_probe,
> +	.remove	= dmc620_pmu_device_remove,
> +};
> +
> +static int __init dmc620_pmu_init(void)
> +{
> +	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
> +				      DMC620_DRVNAME,
> +				      NULL,
> +				      dmc620_pmu_cpu_teardown);
> +	if (cpuhp_state_num < 0)
> +		return cpuhp_state_num;
> +
> +	return platform_driver_register(&dmc620_pmu_driver);
> +}
> +
> +static void __exit dmc620_pmu_exit(void)
> +{
> +	platform_driver_unregister(&dmc620_pmu_driver);
> +	cpuhp_remove_multi_state(cpuhp_state_num);
> +}
> +
> +module_init(dmc620_pmu_init);
> +module_exit(dmc620_pmu_exit);
> +
> +MODULE_DESCRIPTION("Perf driver for the ARM DMC-620 memory controller");
> +MODULE_AUTHOR("Tuan Phan <tuanphan@os.amperecomputing.com");
> +MODULE_LICENSE("GPL v2");
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
@ 2020-10-22 17:34   ` Robin Murphy
  0 siblings, 0 replies; 12+ messages in thread
From: Robin Murphy @ 2020-10-22 17:34 UTC (permalink / raw)
  To: Tuan Phan
  Cc: mark.rutland, patches, Will Deacon, linux-kernel, linux-arm-kernel

On 2020-10-21 00:10, Tuan Phan wrote:
> DMC-620 PMU supports total 10 counters which each is
> independently programmable to different events and can
> be started and stopped individually.
> 
> Currently, it only supports ACPI. Other platforms feel free to test and add
> support for device tree.
> 
> Usage example:
>    #perf stat -e arm_dmc620_10008c000/clk_cycle_count/ -C 0
>    Get perf event for clk_cycle_count counter.
> 
>    #perf stat -e arm_dmc620_10008c000/clkdiv2_allocate,mask=0x1f,match=0x2f,
>    incr=2,invert=1/ -C 0
>    The above example shows how to specify mask, match, incr,
>    invert parameters for clkdiv2_allocate event.
> 
> Signed-off-by: Tuan Phan <tuanphan@os.amperecomputing.com>
> ---
> Changes in v4:
> - Addressed Robin's comments.
> 
> Changes in v3:
> - Removed "_OFFSET" suffix.
> - Renamed "affinity" to "irq".
> - Have a better definition of group register.
> 
> Changes in v2:
> - Removed IRQF_SHARED flag and added support for multiple
> PMUs sharing the same interrupt.
> - Fixed an interrupt handler race condition.
> 
> The ACPI binding spec for PMU DMC620 can be downloaded at:
> https://developer.arm.com/documentation/den0093/c/
> 
>   drivers/perf/Kconfig          |   7 +
>   drivers/perf/Makefile         |   1 +
>   drivers/perf/arm_dmc620_pmu.c | 746 ++++++++++++++++++++++++++++++++++++++++++
>   3 files changed, 754 insertions(+)
>   create mode 100644 drivers/perf/arm_dmc620_pmu.c
> 
> diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
> index 130327ff..3075cf1 100644
> --- a/drivers/perf/Kconfig
> +++ b/drivers/perf/Kconfig
> @@ -130,6 +130,13 @@ config ARM_SPE_PMU
>   	  Extension, which provides periodic sampling of operations in
>   	  the CPU pipeline and reports this via the perf AUX interface.
>   
> +config ARM_DMC620_PMU
> +	tristate "Enable PMU support for the ARM DMC-620 memory controller"
> +	depends on (ARM64 && ACPI) || COMPILE_TEST
> +	help
> +	  Support for PMU events monitoring on the ARM DMC-620 memory
> +	  controller.
> +
>   source "drivers/perf/hisilicon/Kconfig"
>   
>   endmenu
> diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
> index 5365fd5..5260b11 100644
> --- a/drivers/perf/Makefile
> +++ b/drivers/perf/Makefile
> @@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
>   obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
>   obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
>   obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
> +obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
> diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
> new file mode 100644
> index 0000000..d72cd54
> --- /dev/null
> +++ b/drivers/perf/arm_dmc620_pmu.c
> @@ -0,0 +1,746 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * ARM DMC-620 memory controller PMU driver
> + *
> + * Copyright (C) 2020 Ampere Computing LLC.
> + */
> +
> +#define DMC620_PMUNAME		"arm_dmc620"
> +#define DMC620_DRVNAME		DMC620_PMUNAME "_pmu"
> +#define pr_fmt(fmt)		DMC620_DRVNAME ": " fmt
> +
> +#include <linux/acpi.h>
> +#include <linux/bitfield.h>
> +#include <linux/bitops.h>
> +#include <linux/cpuhotplug.h>
> +#include <linux/cpumask.h>
> +#include <linux/device.h>
> +#include <linux/errno.h>
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/kernel.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/mutex.h>
> +#include <linux/perf_event.h>
> +#include <linux/perf/arm_pmu.h>
> +#include <linux/platform_device.h>
> +#include <linux/printk.h>
> +#include <linux/rculist.h>
> +#include <linux/refcount.h>
> +
> +#define DMC620_PA_SHIFT					12
> +#define DMC620_CNT_INIT					0x80000000
> +#define DMC620_CNT_MAX_PERIOD				0xffffffff
> +#define DMC620_PMU_CLKDIV2_MAX_COUNTERS			8
> +#define DMC620_PMU_CLK_MAX_COUNTERS			2
> +#define DMC620_PMU_MAX_COUNTERS				\
> +	(DMC620_PMU_CLKDIV2_MAX_COUNTERS + DMC620_PMU_CLK_MAX_COUNTERS)
> +
> +/*
> + * The PMU registers start at 0xA00 in the DMC-620 memory map, and these
> + * offsets are relative to that base.
> + *
> + * Each counter has a group of control/value registers, and the
> + * DMC620_PMU_COUNTERn offsets are within a counter group.
> + *
> + * The counter registers groups start at 0xA10.
> + */
> +#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2		0x8
> +#define  DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK	\
> +		(DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1)
> +#define DMC620_PMU_OVERFLOW_STATUS_CLK			0xC
> +#define  DMC620_PMU_OVERFLOW_STATUS_CLK_MASK		\
> +		(DMC620_PMU_CLK_MAX_COUNTERS - 1)
> +#define DMC620_PMU_COUNTERS_BASE			0x10
> +#define DMC620_PMU_COUNTERn_MASK_31_00			0x0
> +#define DMC620_PMU_COUNTERn_MASK_63_32			0x4
> +#define DMC620_PMU_COUNTERn_MATCH_31_00			0x8
> +#define DMC620_PMU_COUNTERn_MATCH_63_32			0xC
> +#define DMC620_PMU_COUNTERn_CONTROL			0x10
> +#define  DMC620_PMU_COUNTERn_CONTROL_ENABLE		BIT(0)
> +#define  DMC620_PMU_COUNTERn_CONTROL_INVERT		BIT(1)
> +#define  DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX		GENMASK(6, 2)
> +#define  DMC620_PMU_COUNTERn_CONTROL_INCR_MUX		GENMASK(8, 7)
> +#define DMC620_PMU_COUNTERn_VALUE			0x20
> +/* Offset of the registers for a given counter, relative to 0xA00 */
> +#define DMC620_PMU_COUNTERn_OFFSET(n) \
> +	(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
> +
> +static LIST_HEAD(dmc620_pmu_irqs);
> +static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
> +
> +struct dmc620_pmu_irq {
> +	struct hlist_node node;
> +	struct list_head pmus_node;
> +	struct list_head irqs_node;
> +	refcount_t refcount;
> +	unsigned int irq_num;
> +	unsigned int cpu;
> +};
> +
> +struct dmc620_pmu {
> +	struct pmu pmu;
> +	struct platform_device *pdev;
> +
> +	void __iomem *base;
> +	struct dmc620_pmu_irq *irq;
> +	struct list_head pmus_node;
> +
> +	/*
> +	 * We put all clkdiv2 and clk counters to a same array.
> +	 * The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to
> +	 * clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS
> +	 * belong to clk counters.
> +	 */
> +	DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS);
> +	struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
> +};
> +
> +#define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
> +
> +static int cpuhp_state_num;
> +
> +struct dmc620_pmu_event_attr {
> +	struct device_attribute attr;
> +	u8 clkdiv2;
> +	u8 eventid;
> +};
> +
> +static ssize_t
> +dmc620_pmu_event_show(struct device *dev,
> +			   struct device_attribute *attr, char *page)
> +{
> +	struct dmc620_pmu_event_attr *eattr;
> +
> +	eattr = container_of(attr, typeof(*eattr), attr);
> +
> +	return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
> +}
> +
> +#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2)		\
> +	(&((struct dmc620_pmu_event_attr[]) {{				\
> +		.attr = __ATTR(_name, 0444, dmc620_pmu_event_show, NULL),	\
> +		.clkdiv2 = _clkdiv2,						\
> +		.eventid = _eventid,					\
> +	}})[0].attr.attr)
> +
> +static struct attribute *dmc620_pmu_events_attrs[] = {
> +	/* clkdiv2 events list */
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_cycle_count, 0x0, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_allocate, 0x1, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_queue_depth, 0x2, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_wr_data, 0x3, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_read_backlog, 0x4, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_mi, 0x5, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_hazard_resolution, 0x6, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_enqueue, 0x7, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_arbitrate, 0x8, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_lrank_turnaround_activate, 0x9, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_prank_turnaround_activate, 0xa, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_read_depth, 0xb, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_write_depth, 0xc, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_highigh_qos_depth, 0xd, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_high_qos_depth, 0xe, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_medium_qos_depth, 0xf, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_low_qos_depth, 0x10, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_activate, 0x11, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_rdwr, 0x12, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_refresh, 0x13, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_training_request, 0x14, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_t_mac_tracker, 0x15, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_fsm_tracker, 0x16, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_bk_open_tracker, 0x17, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_pwr_down, 0x18, 1),
> +	DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_sref, 0x19, 1),
> +
> +	/* clk events list */
> +	DMC620_PMU_EVENT_ATTR(clk_cycle_count, 0x0, 0),
> +	DMC620_PMU_EVENT_ATTR(clk_request, 0x1, 0),
> +	DMC620_PMU_EVENT_ATTR(clk_upload_stall, 0x2, 0),
> +	NULL,
> +};
> +
> +static struct attribute_group dmc620_pmu_events_attr_group = {
> +	.name = "events",
> +	.attrs = dmc620_pmu_events_attrs,
> +};
> +
> +/* User ABI */
> +#define ATTR_CFG_FLD_mask_CFG		config
> +#define ATTR_CFG_FLD_mask_LO		0
> +#define ATTR_CFG_FLD_mask_HI		44
> +#define ATTR_CFG_FLD_match_CFG		config1
> +#define ATTR_CFG_FLD_match_LO		0
> +#define ATTR_CFG_FLD_match_HI		44
> +#define ATTR_CFG_FLD_invert_CFG		config2
> +#define ATTR_CFG_FLD_invert_LO		0
> +#define ATTR_CFG_FLD_invert_HI		0
> +#define ATTR_CFG_FLD_incr_CFG		config2
> +#define ATTR_CFG_FLD_incr_LO		1
> +#define ATTR_CFG_FLD_incr_HI		2
> +#define ATTR_CFG_FLD_event_CFG		config2
> +#define ATTR_CFG_FLD_event_LO		3
> +#define ATTR_CFG_FLD_event_HI		8
> +#define ATTR_CFG_FLD_clkdiv2_CFG	config2
> +#define ATTR_CFG_FLD_clkdiv2_LO		9
> +#define ATTR_CFG_FLD_clkdiv2_HI		9
> +
> +#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
> +	(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
> +
> +#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi)			\
> +	__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
> +
> +#define GEN_PMU_FORMAT_ATTR(name)				\
> +	PMU_FORMAT_ATTR(name,					\
> +	_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG,		\
> +			     ATTR_CFG_FLD_##name##_LO,		\
> +			     ATTR_CFG_FLD_##name##_HI))
> +
> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))

As per the buildbot report, GENMASK_ULL() would be appropriate when the 
other side is a u64 (although either way this does look a lot like 
reinventing FIELD_GET()...)

> +#define ATTR_CFG_GET_FLD(attr, name)				\
> +	_ATTR_CFG_GET_FLD(attr,					\
> +			  ATTR_CFG_FLD_##name##_CFG,		\
> +			  ATTR_CFG_FLD_##name##_LO,		\
> +			  ATTR_CFG_FLD_##name##_HI)
> +
> +GEN_PMU_FORMAT_ATTR(mask);
> +GEN_PMU_FORMAT_ATTR(match);
> +GEN_PMU_FORMAT_ATTR(invert);
> +GEN_PMU_FORMAT_ATTR(incr);
> +GEN_PMU_FORMAT_ATTR(event);
> +GEN_PMU_FORMAT_ATTR(clkdiv2);
> +
> +static struct attribute *dmc620_pmu_formats_attrs[] = {
> +	&format_attr_mask.attr,
> +	&format_attr_match.attr,
> +	&format_attr_invert.attr,
> +	&format_attr_incr.attr,
> +	&format_attr_event.attr,
> +	&format_attr_clkdiv2.attr,
> +	NULL,
> +};
> +
> +static struct attribute_group dmc620_pmu_format_attr_group = {
> +	.name	= "format",
> +	.attrs	= dmc620_pmu_formats_attrs,
> +};
> +
> +static const struct attribute_group *dmc620_pmu_attr_groups[] = {
> +	&dmc620_pmu_events_attr_group,
> +	&dmc620_pmu_format_attr_group,
> +	NULL,
> +};
> +
> +static inline
> +u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
> +			unsigned int idx, unsigned int reg)
> +{
> +	return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
> +}
> +
> +static inline
> +void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
> +			unsigned int idx, unsigned int reg, u32 val)
> +{
> +	writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
> +}
> +
> +static
> +unsigned int dmc620_event_to_counter_control(struct perf_event *event)
> +{
> +	struct perf_event_attr *attr = &event->attr;
> +	unsigned int reg = 0;
> +
> +	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INVERT,

FIELD_PREP(), surely? (all 3 times)

> +			ATTR_CFG_GET_FLD(attr, invert));
> +	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_INCR_MUX,
> +			ATTR_CFG_GET_FLD(attr, incr));
> +	reg |= FIELD_GET(DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX,
> +			ATTR_CFG_GET_FLD(attr, event));

As a cheeky micro-optimisation, consider laying out the relevant fields 
in the config word in the same order as the corresponding register 
fields, such that the compiler can ultimately optimise this whole lot 
together into a single mask/shift operation.

> +	return reg;
> +}
> +
> +static int dmc620_get_event_idx(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	int idx, start_idx, end_idx;
> +
> +	if (ATTR_CFG_GET_FLD(&event->attr, clkdiv2)) {
> +		start_idx = 0;
> +		end_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
> +	} else {
> +		start_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
> +		end_idx = DMC620_PMU_MAX_COUNTERS;
> +	}
> +
> +	for (idx = start_idx; idx < end_idx; ++idx) {
> +		if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
> +			return idx;
> +	}
> +
> +	/* The counters are all in use. */
> +	return -EAGAIN;
> +}
> +
> +static u64 dmc620_pmu_read_counter(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +
> +	return dmc620_pmu_creg_read(dmc620_pmu,
> +				    event->hw.idx, DMC620_PMU_COUNTERn_VALUE);
> +}

Might it be simpler to inline this in its sole caller? I'm not sure it 
adds any clarity to have *two* levels of trivial indirection around a 
simple register read.

> +static void dmc620_pmu_event_update(struct perf_event *event)
> +{
> +	struct hw_perf_event *hwc = &event->hw;
> +	u64 delta, prev_count, new_count;
> +
> +	do {
> +		/* We may also be called from the irq handler */
> +		prev_count = local64_read(&hwc->prev_count);
> +		new_count = dmc620_pmu_read_counter(event);
> +	} while (local64_cmpxchg(&hwc->prev_count,
> +			prev_count, new_count) != prev_count);
> +	delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
> +	local64_add(delta, &event->count);
> +}
> +
> +static void dmc620_pmu_event_set_period(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +
> +	local64_set(&event->hw.prev_count, DMC620_CNT_INIT);
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_VALUE, DMC620_CNT_INIT);
> +}
> +
> +static void dmc620_pmu_enable_counter(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct perf_event_attr *attr = &event->attr;
> +	u64 reg;
> +
> +	reg = ATTR_CFG_GET_FLD(attr, mask);
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_31_00, lower_32_bits(reg));
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MASK_63_32, upper_32_bits(reg));
> +
> +	reg = ATTR_CFG_GET_FLD(attr, match);
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_31_00, lower_32_bits(reg));
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_MATCH_63_32, upper_32_bits(reg));

Do the mask and match values have to be rewritten every time, or could 
those be set once in event_add() such that you can just toggle the 
control register to start and stop the underlying counter?

> +	reg = dmc620_event_to_counter_control(event) | DMC620_PMU_COUNTERn_CONTROL_ENABLE;
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, (u32)reg);
> +}
> +
> +static void dmc620_pmu_disable_counter(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +
> +	dmc620_pmu_creg_write(dmc620_pmu,
> +			      event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, 0);
> +}
> +
> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
> +{
> +	struct dmc620_pmu_irq *irq = data;
> +	struct dmc620_pmu *dmc620_pmu;
> +	irqreturn_t ret = IRQ_NONE;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
> +		unsigned long status;
> +		struct perf_event *event;
> +		unsigned int idx;
> +
> +		/*
> +		 * HW doesn't provide a control to atomically disable all counters.
> +		 * To prevent race condition, disable all events before continuing
> +		 */

I'm still doubtful that this doesn't introduce more inaccuracy overall 
than whatever it's trying to avoid... :/

> +		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
> +			event = dmc620_pmu->events[idx];
> +			if (!event)
> +				continue;
> +			dmc620_pmu_disable_counter(event);
> +		}
> +
> +		status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
> +		status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
> +				DMC620_PMU_CLKDIV2_MAX_COUNTERS);
> +		if (!status)
> +			continue;

Oh hey, remember that time you unconditionally disabled all the live 
counters on the way in? ;)

> +		for_each_set_bit(idx, &status,
> +				  DMC620_PMU_MAX_COUNTERS) {
> +			event = dmc620_pmu->events[idx];
> +			if (WARN_ON_ONCE(!event))
> +				continue;
> +			dmc620_pmu_event_update(event);
> +			dmc620_pmu_event_set_period(event);
> +		}
> +
> +		if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
> +			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
> +
> +		if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
> +			DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
> +			writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
> +
> +		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
> +			event = dmc620_pmu->events[idx];
> +			if (!event)
> +				continue;
> +			if (!(event->hw.state & PERF_HES_STOPPED))
> +				dmc620_pmu_enable_counter(event);
> +		}
> +
> +		ret = IRQ_HANDLED;
> +	}
> +	rcu_read_unlock();
> +
> +	return ret;
> +}
> +
> +static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
> +{
> +	struct dmc620_pmu_irq *irq;
> +	int ret;
> +
> +	list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
> +		if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
> +			return irq;
> +
> +	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
> +	if (!irq)
> +		return ERR_PTR(-ENOMEM);
> +
> +	INIT_LIST_HEAD(&irq->pmus_node);
> +
> +	/* Pick one CPU to be the preferred one to use */
> +	irq->cpu = raw_smp_processor_id();
> +	refcount_set(&irq->refcount, 1);
> +
> +	ret = request_irq(irq_num, dmc620_pmu_handle_irq,
> +			  IRQF_NOBALANCING | IRQF_NO_THREAD,
> +			  "dmc620-pmu", irq);
> +	if (ret)
> +		goto out_free_aff;
> +
> +	ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
> +	if (ret)
> +		goto out_free_irq;
> +
> +	ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
> +	if (ret)
> +		goto out_free_irq;
> +
> +	irq->irq_num = irq_num;
> +	list_add(&irq->irqs_node, &dmc620_pmu_irqs);
> +
> +	return irq;
> +
> +out_free_irq:
> +	free_irq(irq_num, irq);
> +out_free_aff:
> +	kfree(irq);
> +	return ERR_PTR(ret);
> +}
> +
> +static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
> +{
> +	struct dmc620_pmu_irq *irq;
> +
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	irq = __dmc620_pmu_get_irq(irq_num);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	if (IS_ERR(irq))
> +		return PTR_ERR(irq);
> +
> +	dmc620_pmu->irq = irq;
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	return 0;
> +}
> +
> +static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
> +{
> +	struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
> +
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	list_del_rcu(&dmc620_pmu->pmus_node);
> +
> +	if (!refcount_dec_and_test(&irq->refcount)) {
> +		mutex_unlock(&dmc620_pmu_irqs_lock);
> +		return;
> +	}
> +
> +	list_del(&irq->irqs_node);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	free_irq(irq->irq_num, irq);
> +	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
> +	kfree(irq);
> +}
> +
> +static int dmc620_pmu_event_init(struct perf_event *event)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	struct perf_event *sibling;
> +
> +	if (event->attr.type != event->pmu->type)
> +		return -ENOENT;
> +
> +	/*
> +	 * DMC 620 PMUs are shared across all cpus and cannot
> +	 * support task bound and sampling events.
> +	 */
> +	if (is_sampling_event(event) ||
> +		event->attach_state & PERF_ATTACH_TASK) {
> +		dev_dbg(dmc620_pmu->pmu.dev,
> +			"Can't support per-task counters\n");
> +		return -EOPNOTSUPP;
> +	}
> +
> +	/*
> +	 * Many perf core operations (eg. events rotation) operate on a
> +	 * single CPU context. This is obvious for CPU PMUs, where one
> +	 * expects the same sets of events being observed on all CPUs,
> +	 * but can lead to issues for off-core PMUs, where each
> +	 * event could be theoretically assigned to a different CPU. To
> +	 * mitigate this, we enforce CPU assignment to one, selected
> +	 * processor.
> +	 */
> +	event->cpu = dmc620_pmu->irq->cpu;
> +	if (event->cpu < 0)
> +		return -EINVAL;
> +
> +	/*
> +	 * We must NOT create groups containing mixed PMUs, although software
> +	 * events are acceptable.
> +	 */
> +	if (event->group_leader->pmu != event->pmu &&
> +			!is_software_event(event->group_leader))
> +		return -EINVAL;
> +
> +	for_each_sibling_event(sibling, event->group_leader) {
> +		if (sibling->pmu != event->pmu &&
> +				!is_software_event(sibling))
> +			return -EINVAL;
> +	}

As before, if you can't start, stop, or read multiple counters 
atomically, you can't really support groups of events for this PMU 
either. It's impossible to measure accurate ratios with a variable 
amount of skew between individual counters.

> +	hwc->idx = -1;
> +	return 0;
> +}
> +
> +static void dmc620_pmu_read(struct perf_event *event)
> +{
> +	dmc620_pmu_event_update(event);
> +}
> +
> +static void dmc620_pmu_start(struct perf_event *event, int flags)
> +{
> +	event->hw.state = 0;
> +	dmc620_pmu_event_set_period(event);
> +	dmc620_pmu_enable_counter(event);
> +}
> +
> +static void dmc620_pmu_stop(struct perf_event *event, int flags)
> +{
> +	if (event->hw.state & PERF_HES_STOPPED)
> +		return;
> +
> +	dmc620_pmu_disable_counter(event);
> +	dmc620_pmu_event_update(event);
> +	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +}
> +
> +static int dmc620_pmu_add(struct perf_event *event, int flags)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx;
> +
> +	idx = dmc620_get_event_idx(event);
> +	if (idx < 0)
> +		return idx;
> +
> +	hwc->idx = idx;
> +	dmc620_pmu->events[idx] = event;
> +	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
> +
> +	if (flags & PERF_EF_START)
> +		dmc620_pmu_start(event, PERF_EF_RELOAD);
> +
> +	perf_event_update_userpage(event);
> +	return 0;
> +}
> +
> +static void dmc620_pmu_del(struct perf_event *event, int flags)
> +{
> +	struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	int idx = hwc->idx;
> +
> +	dmc620_pmu_stop(event, PERF_EF_UPDATE);
> +	dmc620_pmu->events[idx] = NULL;
> +	clear_bit(idx, dmc620_pmu->used_mask);
> +	perf_event_update_userpage(event);
> +}
> +
> +static int dmc620_pmu_cpu_teardown(unsigned int cpu,
> +				   struct hlist_node *node)
> +{
> +	struct dmc620_pmu_irq *irq;
> +	struct dmc620_pmu *dmc620_pmu;
> +	unsigned int target;
> +
> +	irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
> +	if (cpu != irq->cpu)
> +		return 0;
> +
> +	target = cpumask_any_but(cpu_online_mask, cpu);
> +	if (target >= nr_cpu_ids)
> +		return 0;
> +
> +	/* We're only reading, but this isn't the place to be involving RCU */
> +	mutex_lock(&dmc620_pmu_irqs_lock);
> +	list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
> +		perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
> +	mutex_unlock(&dmc620_pmu_irqs_lock);
> +
> +	WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
> +	irq->cpu = target;
> +
> +	return 0;
> +}
> +
> +static int dmc620_pmu_device_probe(struct platform_device *pdev)
> +{
> +	struct dmc620_pmu *dmc620_pmu;
> +	struct resource *res;
> +	char *name;
> +	int irq_num;
> +	int i, ret;
> +
> +	dmc620_pmu = devm_kzalloc(&pdev->dev,
> +			sizeof(struct dmc620_pmu), GFP_KERNEL);
> +	if (!dmc620_pmu)
> +		return -ENOMEM;
> +
> +	dmc620_pmu->pdev = pdev;

Is this ever used?

> +	platform_set_drvdata(pdev, dmc620_pmu);
> +
> +	dmc620_pmu->pmu = (struct pmu) {
> +		.module = THIS_MODULE,
> +		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
> +		.task_ctx_nr	= perf_invalid_context,
> +		.event_init	= dmc620_pmu_event_init,
> +		.add		= dmc620_pmu_add,
> +		.del		= dmc620_pmu_del,
> +		.start		= dmc620_pmu_start,
> +		.stop		= dmc620_pmu_stop,
> +		.read		= dmc620_pmu_read,
> +		.attr_groups	= dmc620_pmu_attr_groups,
> +	};
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
> +	if (IS_ERR(dmc620_pmu->base))
> +		return PTR_ERR(dmc620_pmu->base);
> +
> +	/* Make sure device is reset before enabling interrupt */
> +	for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
> +		dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);

Maybe make sure the overflow registers are clear too?

> +	irq_num = platform_get_irq(pdev, 0);
> +	if (irq_num < 0)
> +		return irq_num;
> +
> +	ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
> +	if (ret)
> +		return ret;
> +
> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
> +				  "%s_%llx", DMC620_PMUNAME,
> +				  (res->start) >> DMC620_PA_SHIFT);

res->start doesn't need parentheses, however I guess it might need 
casting to u64 to solve the build warning (I'm not sure there's any 
nicer way, unfortunately).

Robin.

> +	if (!name) {
> +		dev_err(&pdev->dev,
> +			  "Create name failed, PMU @%pa\n", &res->start);
> +		goto out_teardown_dev;
> +	}
> +
> +	ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
> +	if (ret)
> +		goto out_teardown_dev;
> +
> +	return 0;
> +
> +out_teardown_dev:
> +	dmc620_pmu_put_irq(dmc620_pmu);
> +	synchronize_rcu();
> +	return ret;
> +}
> +
> +static int dmc620_pmu_device_remove(struct platform_device *pdev)
> +{
> +	struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
> +
> +	dmc620_pmu_put_irq(dmc620_pmu);
> +
> +	/* perf will synchronise RCU before devres can free dmc620_pmu */
> +	perf_pmu_unregister(&dmc620_pmu->pmu);
> +
> +	return 0;
> +}
> +
> +static const struct acpi_device_id dmc620_acpi_match[] = {
> +	{ "ARMHD620", 0},
> +	{},
> +};
> +MODULE_DEVICE_TABLE(acpi, dmc620_acpi_match);
> +static struct platform_driver dmc620_pmu_driver = {
> +	.driver	= {
> +		.name		= DMC620_DRVNAME,
> +		.acpi_match_table = dmc620_acpi_match,
> +	},
> +	.probe	= dmc620_pmu_device_probe,
> +	.remove	= dmc620_pmu_device_remove,
> +};
> +
> +static int __init dmc620_pmu_init(void)
> +{
> +	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
> +				      DMC620_DRVNAME,
> +				      NULL,
> +				      dmc620_pmu_cpu_teardown);
> +	if (cpuhp_state_num < 0)
> +		return cpuhp_state_num;
> +
> +	return platform_driver_register(&dmc620_pmu_driver);
> +}
> +
> +static void __exit dmc620_pmu_exit(void)
> +{
> +	platform_driver_unregister(&dmc620_pmu_driver);
> +	cpuhp_remove_multi_state(cpuhp_state_num);
> +}
> +
> +module_init(dmc620_pmu_init);
> +module_exit(dmc620_pmu_exit);
> +
> +MODULE_DESCRIPTION("Perf driver for the ARM DMC-620 memory controller");
> +MODULE_AUTHOR("Tuan Phan <tuanphan@os.amperecomputing.com");
> +MODULE_LICENSE("GPL v2");
> 

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
       [not found]   ` <1EC85DEF-8E0C-42B9-9B01-DA897147B1F7@amperemail.onmicrosoft.com>
@ 2020-10-23 13:43       ` Robin Murphy
  0 siblings, 0 replies; 12+ messages in thread
From: Robin Murphy @ 2020-10-23 13:43 UTC (permalink / raw)
  To: Tuan Phan
  Cc: Tuan Phan, patches, Mark Rutland, Will Deacon, linux-kernel,
	linux-arm-kernel

On 2020-10-22 22:46, Tuan Phan wrote:
[...]
>>> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
>>> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
>>
>> As per the buildbot report, GENMASK_ULL() would be appropriate when the other side is a u64 (although either way this does look a lot like reinventing FIELD_GET()...)
> 
> I will add (COMPILE_TEST && 64BIT) to Kconfig so it should fix the buildbot report.

Huh? The left-hand side of the "&" expression will always be a u64 here, 
which is unsigned long long. Regardless of whether an unsigned long on 
the right-hand side happens to be the same size, you have a semantic 
type mismatch, which is trivial to put right. I can't comprehend why 
introducing a fake build dependency to hide this would seem like a 
better idea than making a tiny change to make the code 100% correct and 
robust with zero practical impact :/

Sure, you only copied this from the SPE driver; that doesn't mean it was 
ever correct, simply that the mismatch was hidden since that driver *is* 
tightly coupled to one particular CPU ISA.

[...]
>>> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
>>> +{
>>> +	struct dmc620_pmu_irq *irq = data;
>>> +	struct dmc620_pmu *dmc620_pmu;
>>> +	irqreturn_t ret = IRQ_NONE;
>>> +
>>> +	rcu_read_lock();
>>> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
>>> +		unsigned long status;
>>> +		struct perf_event *event;
>>> +		unsigned int idx;
>>> +
>>> +		/*
>>> +		 * HW doesn't provide a control to atomically disable all counters.
>>> +		 * To prevent race condition, disable all events before continuing
>>> +		 */
>>
>> I'm still doubtful that this doesn't introduce more inaccuracy overall than whatever it's trying to avoid... :/
> 
> It think it does. By disabling all counters, you make sure overflow status not change at the same time you are clearing
> it(by writing zero) after reading all counters.

Urgh, *now* I get what the race is - we don't have a proper 
write-1-to-clear interrupt status register, so however much care we take 
in writing back to the overflow register there's always *some* risk of 
wiping out a new event when writing back, unless we ensure that no new 
overflows can occur *before* reading the status. What a horrible piece 
of hardware design... :(

Perhaps it's worth expanding the comment a little more, since apparently 
it's not super-obvious.

[...]
>>> +	/*
>>> +	 * We must NOT create groups containing mixed PMUs, although software
>>> +	 * events are acceptable.
>>> +	 */
>>> +	if (event->group_leader->pmu != event->pmu &&
>>> +			!is_software_event(event->group_leader))
>>> +		return -EINVAL;
>>> +
>>> +	for_each_sibling_event(sibling, event->group_leader) {
>>> +		if (sibling->pmu != event->pmu &&
>>> +				!is_software_event(sibling))
>>> +			return -EINVAL;
>>> +	}
>>
>> As before, if you can't start, stop, or read multiple counters atomically, you can't really support groups of events for this PMU either. It's impossible to measure accurate ratios with a variable amount of skew between individual counters.
> 
> Can you elaborate more? The only issue I know is we can’t stop all counters of same PMU atomically in IRQ handler to prevent race condition.  But it can be fixed by manually disable each counter. Other than that, every counters are working independently.

The point of groups is to be able to count two or more events for the 
exact same time period, in order to measure ratios between them 
accurately. ->add, ->del, ->read, etc. are still called one at a time 
for each event in the group, but those calls are made as part of a 
transaction, which for most drivers is achieved by perf core calling 
->pmu_disable and ->pmu_enable around the other calls. Since this driver 
doesn't have enable/disable functionality, the individual events will 
count for different lengths of time depending on what order those calls 
are made in (which is not necessarily constant), and how long each one 
takes. Thus you'll end up with an indeterminate amount of error between 
what each count represents, and the group is not really any more 
accurate than if the events were simply scheduled independently, which 
is not how it's supposed to work.

Come to think of it, you're also not validating that groups are even 
schedulable - try something like:

   perf stat -e 
'{arm_dmc620_10008c000/clk_cycle_count/,arm_dmc620_10008c000/clk_request/,arm_dmc620_10008c000/clk_upload_stall/}' 
sleep 5

and observe perf core being very confused and unhappy when ->add starts 
failing for a group that ->event_init said was OK, since 3 events won't 
actually fit into the 2 available counters.

As I said before, I think you probably would be able to make groups work 
with some careful hooking up of snapshot functionality to ->start_txn 
and ->commit_txn, but to start with it'll be an awful lot simpler to 
just be honest and reject them.

[...]
>>> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
>>> +				  "%s_%llx", DMC620_PMUNAME,
>>> +				  (res->start) >> DMC620_PA_SHIFT);
>>
>> res->start doesn't need parentheses, however I guess it might need casting to u64 to solve the build warning (I'm not sure there's any nicer way, unfortunately).
> 
> I will remove those parentheses, we don’t need u64 as build warning only applies when it runs compiling test with 32bit.

As above, deliberately hacking the build for the sake of not fixing 
clearly dodgy code is crazy. The only correct format specifier for an 
expression of type phys_addr_t/resource_size_t is "%pa"; if you want to 
use a different format then explicitly converting the argument to a type 
appropriate for that format (either via a simple cast or an intermediate 
variable) is indisputably correct, regardless of whether you might 
happen to get away with an implicit conversion sometimes.

The whole point of maximising COMPILE_TEST coverage is to improve code 
quality in order to avoid this exact situation, wherein someone copies a 
pattern from an existing driver only to discover that it's not actually 
as robust as it should be.

Robin.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
@ 2020-10-23 13:43       ` Robin Murphy
  0 siblings, 0 replies; 12+ messages in thread
From: Robin Murphy @ 2020-10-23 13:43 UTC (permalink / raw)
  To: Tuan Phan
  Cc: Mark Rutland, Tuan Phan, linux-kernel, patches, Will Deacon,
	linux-arm-kernel

On 2020-10-22 22:46, Tuan Phan wrote:
[...]
>>> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
>>> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
>>
>> As per the buildbot report, GENMASK_ULL() would be appropriate when the other side is a u64 (although either way this does look a lot like reinventing FIELD_GET()...)
> 
> I will add (COMPILE_TEST && 64BIT) to Kconfig so it should fix the buildbot report.

Huh? The left-hand side of the "&" expression will always be a u64 here, 
which is unsigned long long. Regardless of whether an unsigned long on 
the right-hand side happens to be the same size, you have a semantic 
type mismatch, which is trivial to put right. I can't comprehend why 
introducing a fake build dependency to hide this would seem like a 
better idea than making a tiny change to make the code 100% correct and 
robust with zero practical impact :/

Sure, you only copied this from the SPE driver; that doesn't mean it was 
ever correct, simply that the mismatch was hidden since that driver *is* 
tightly coupled to one particular CPU ISA.

[...]
>>> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
>>> +{
>>> +	struct dmc620_pmu_irq *irq = data;
>>> +	struct dmc620_pmu *dmc620_pmu;
>>> +	irqreturn_t ret = IRQ_NONE;
>>> +
>>> +	rcu_read_lock();
>>> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
>>> +		unsigned long status;
>>> +		struct perf_event *event;
>>> +		unsigned int idx;
>>> +
>>> +		/*
>>> +		 * HW doesn't provide a control to atomically disable all counters.
>>> +		 * To prevent race condition, disable all events before continuing
>>> +		 */
>>
>> I'm still doubtful that this doesn't introduce more inaccuracy overall than whatever it's trying to avoid... :/
> 
> It think it does. By disabling all counters, you make sure overflow status not change at the same time you are clearing
> it(by writing zero) after reading all counters.

Urgh, *now* I get what the race is - we don't have a proper 
write-1-to-clear interrupt status register, so however much care we take 
in writing back to the overflow register there's always *some* risk of 
wiping out a new event when writing back, unless we ensure that no new 
overflows can occur *before* reading the status. What a horrible piece 
of hardware design... :(

Perhaps it's worth expanding the comment a little more, since apparently 
it's not super-obvious.

[...]
>>> +	/*
>>> +	 * We must NOT create groups containing mixed PMUs, although software
>>> +	 * events are acceptable.
>>> +	 */
>>> +	if (event->group_leader->pmu != event->pmu &&
>>> +			!is_software_event(event->group_leader))
>>> +		return -EINVAL;
>>> +
>>> +	for_each_sibling_event(sibling, event->group_leader) {
>>> +		if (sibling->pmu != event->pmu &&
>>> +				!is_software_event(sibling))
>>> +			return -EINVAL;
>>> +	}
>>
>> As before, if you can't start, stop, or read multiple counters atomically, you can't really support groups of events for this PMU either. It's impossible to measure accurate ratios with a variable amount of skew between individual counters.
> 
> Can you elaborate more? The only issue I know is we can’t stop all counters of same PMU atomically in IRQ handler to prevent race condition.  But it can be fixed by manually disable each counter. Other than that, every counters are working independently.

The point of groups is to be able to count two or more events for the 
exact same time period, in order to measure ratios between them 
accurately. ->add, ->del, ->read, etc. are still called one at a time 
for each event in the group, but those calls are made as part of a 
transaction, which for most drivers is achieved by perf core calling 
->pmu_disable and ->pmu_enable around the other calls. Since this driver 
doesn't have enable/disable functionality, the individual events will 
count for different lengths of time depending on what order those calls 
are made in (which is not necessarily constant), and how long each one 
takes. Thus you'll end up with an indeterminate amount of error between 
what each count represents, and the group is not really any more 
accurate than if the events were simply scheduled independently, which 
is not how it's supposed to work.

Come to think of it, you're also not validating that groups are even 
schedulable - try something like:

   perf stat -e 
'{arm_dmc620_10008c000/clk_cycle_count/,arm_dmc620_10008c000/clk_request/,arm_dmc620_10008c000/clk_upload_stall/}' 
sleep 5

and observe perf core being very confused and unhappy when ->add starts 
failing for a group that ->event_init said was OK, since 3 events won't 
actually fit into the 2 available counters.

As I said before, I think you probably would be able to make groups work 
with some careful hooking up of snapshot functionality to ->start_txn 
and ->commit_txn, but to start with it'll be an awful lot simpler to 
just be honest and reject them.

[...]
>>> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
>>> +				  "%s_%llx", DMC620_PMUNAME,
>>> +				  (res->start) >> DMC620_PA_SHIFT);
>>
>> res->start doesn't need parentheses, however I guess it might need casting to u64 to solve the build warning (I'm not sure there's any nicer way, unfortunately).
> 
> I will remove those parentheses, we don’t need u64 as build warning only applies when it runs compiling test with 32bit.

As above, deliberately hacking the build for the sake of not fixing 
clearly dodgy code is crazy. The only correct format specifier for an 
expression of type phys_addr_t/resource_size_t is "%pa"; if you want to 
use a different format then explicitly converting the argument to a type 
appropriate for that format (either via a simple cast or an intermediate 
variable) is indisputably correct, regardless of whether you might 
happen to get away with an implicit conversion sometimes.

The whole point of maximising COMPILE_TEST coverage is to improve code 
quality in order to avoid this exact situation, wherein someone copies a 
pattern from an existing driver only to discover that it's not actually 
as robust as it should be.

Robin.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
  2020-10-23 13:43       ` Robin Murphy
@ 2020-10-23 18:23         ` Tuan Phan
  -1 siblings, 0 replies; 12+ messages in thread
From: Tuan Phan @ 2020-10-23 18:23 UTC (permalink / raw)
  To: Robin Murphy
  Cc: Tuan Phan, patches, Mark Rutland, Will Deacon, linux-kernel,
	linux-arm-kernel



> On Oct 23, 2020, at 6:43 AM, Robin Murphy <robin.murphy@arm.com> wrote:
> 
> On 2020-10-22 22:46, Tuan Phan wrote:
> [...]
>>>> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
>>>> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
>>> 
>>> As per the buildbot report, GENMASK_ULL() would be appropriate when the other side is a u64 (although either way this does look a lot like reinventing FIELD_GET()...)
>> I will add (COMPILE_TEST && 64BIT) to Kconfig so it should fix the buildbot report.
> 
> Huh? The left-hand side of the "&" expression will always be a u64 here, which is unsigned long long. Regardless of whether an unsigned long on the right-hand side happens to be the same size, you have a semantic type mismatch, which is trivial to put right. I can't comprehend why introducing a fake build dependency to hide this would seem like a better idea than making a tiny change to make the code 100% correct and robust with zero practical impact :/
> 
> Sure, you only copied this from the SPE driver; that doesn't mean it was ever correct, simply that the mismatch was hidden since that driver *is* tightly coupled to one particular CPU ISA.

Got it. Actually after seeing your CMN driver which has (COMPILE_TEST && 64BIT), I thought the driver should be only tested under 64BIT platform. Any reason why CMN need 64BIT with COMPILE_TEST?

> 
> [...]
>>>> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
>>>> +{
>>>> +	struct dmc620_pmu_irq *irq = data;
>>>> +	struct dmc620_pmu *dmc620_pmu;
>>>> +	irqreturn_t ret = IRQ_NONE;
>>>> +
>>>> +	rcu_read_lock();
>>>> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
>>>> +		unsigned long status;
>>>> +		struct perf_event *event;
>>>> +		unsigned int idx;
>>>> +
>>>> +		/*
>>>> +		 * HW doesn't provide a control to atomically disable all counters.
>>>> +		 * To prevent race condition, disable all events before continuing
>>>> +		 */
>>> 
>>> I'm still doubtful that this doesn't introduce more inaccuracy overall than whatever it's trying to avoid... :/
>> It think it does. By disabling all counters, you make sure overflow status not change at the same time you are clearing
>> it(by writing zero) after reading all counters.
> 
> Urgh, *now* I get what the race is - we don't have a proper write-1-to-clear interrupt status register, so however much care we take in writing back to the overflow register there's always *some* risk of wiping out a new event when writing back, unless we ensure that no new overflows can occur *before* reading the status. What a horrible piece of hardware design... :(
> 
> Perhaps it's worth expanding the comment a little more, since apparently it's not super-obvious.

I will expand the common more to explain the race condition.
> 
> [...]
>>>> +	/*
>>>> +	 * We must NOT create groups containing mixed PMUs, although software
>>>> +	 * events are acceptable.
>>>> +	 */
>>>> +	if (event->group_leader->pmu != event->pmu &&
>>>> +			!is_software_event(event->group_leader))
>>>> +		return -EINVAL;
>>>> +
>>>> +	for_each_sibling_event(sibling, event->group_leader) {
>>>> +		if (sibling->pmu != event->pmu &&
>>>> +				!is_software_event(sibling))
>>>> +			return -EINVAL;
>>>> +	}
>>> 
>>> As before, if you can't start, stop, or read multiple counters atomically, you can't really support groups of events for this PMU either. It's impossible to measure accurate ratios with a variable amount of skew between individual counters.
>> Can you elaborate more? The only issue I know is we can’t stop all counters of same PMU atomically in IRQ handler to prevent race condition.  But it can be fixed by manually disable each counter. Other than that, every counters are working independently.
> 
> The point of groups is to be able to count two or more events for the exact same time period, in order to measure ratios between them accurately. ->add, ->del, ->read, etc. are still called one at a time for each event in the group, but those calls are made as part of a transaction, which for most drivers is achieved by perf core calling ->pmu_disable and ->pmu_enable around the other calls. Since this driver doesn't have enable/disable functionality, the individual events will count for different lengths of time depending on what order those calls are made in (which is not necessarily constant), and how long each one takes. Thus you'll end up with an indeterminate amount of error between what each count represents, and the group is not really any more accurate than if the events were simply scheduled independently, which is not how it's supposed to work.
> 
> Come to think of it, you're also not validating that groups are even schedulable - try something like:
> 
>  perf stat -e '{arm_dmc620_10008c000/clk_cycle_count/,arm_dmc620_10008c000/clk_request/,arm_dmc620_10008c000/clk_upload_stall/}' sleep 5
> 
> and observe perf core being very confused and unhappy when ->add starts failing for a group that ->event_init said was OK, since 3 events won't actually fit into the 2 available counters.
> 
> As I said before, I think you probably would be able to make groups work with some careful hooking up of snapshot functionality to ->start_txn and ->commit_txn, but to start with it'll be an awful lot simpler to just be honest and reject them.

Got it. Thanks for educating me. I will allow only one HW event then.
> 
> [...]
>>>> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
>>>> +				  "%s_%llx", DMC620_PMUNAME,
>>>> +				  (res->start) >> DMC620_PA_SHIFT);
>>> 
>>> res->start doesn't need parentheses, however I guess it might need casting to u64 to solve the build warning (I'm not sure there's any nicer way, unfortunately).
>> I will remove those parentheses, we don’t need u64 as build warning only applies when it runs compiling test with 32bit.
> 
> As above, deliberately hacking the build for the sake of not fixing clearly dodgy code is crazy. The only correct format specifier for an expression of type phys_addr_t/resource_size_t is "%pa"; if you want to use a different format then explicitly converting the argument to a type appropriate for that format (either via a simple cast or an intermediate variable) is indisputably correct, regardless of whether you might happen to get away with an implicit conversion sometimes.
> 
> The whole point of maximising COMPILE_TEST coverage is to improve code quality in order to avoid this exact situation, wherein someone copies a pattern from an existing driver only to discover that it's not actually as robust as it should be.

Got it. Will fix it

Thanks,
Tuan.

> 
> Robin.


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
@ 2020-10-23 18:23         ` Tuan Phan
  0 siblings, 0 replies; 12+ messages in thread
From: Tuan Phan @ 2020-10-23 18:23 UTC (permalink / raw)
  To: Robin Murphy
  Cc: Mark Rutland, Tuan Phan, linux-kernel, patches, Will Deacon,
	linux-arm-kernel



> On Oct 23, 2020, at 6:43 AM, Robin Murphy <robin.murphy@arm.com> wrote:
> 
> On 2020-10-22 22:46, Tuan Phan wrote:
> [...]
>>>> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
>>>> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
>>> 
>>> As per the buildbot report, GENMASK_ULL() would be appropriate when the other side is a u64 (although either way this does look a lot like reinventing FIELD_GET()...)
>> I will add (COMPILE_TEST && 64BIT) to Kconfig so it should fix the buildbot report.
> 
> Huh? The left-hand side of the "&" expression will always be a u64 here, which is unsigned long long. Regardless of whether an unsigned long on the right-hand side happens to be the same size, you have a semantic type mismatch, which is trivial to put right. I can't comprehend why introducing a fake build dependency to hide this would seem like a better idea than making a tiny change to make the code 100% correct and robust with zero practical impact :/
> 
> Sure, you only copied this from the SPE driver; that doesn't mean it was ever correct, simply that the mismatch was hidden since that driver *is* tightly coupled to one particular CPU ISA.

Got it. Actually after seeing your CMN driver which has (COMPILE_TEST && 64BIT), I thought the driver should be only tested under 64BIT platform. Any reason why CMN need 64BIT with COMPILE_TEST?

> 
> [...]
>>>> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
>>>> +{
>>>> +	struct dmc620_pmu_irq *irq = data;
>>>> +	struct dmc620_pmu *dmc620_pmu;
>>>> +	irqreturn_t ret = IRQ_NONE;
>>>> +
>>>> +	rcu_read_lock();
>>>> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
>>>> +		unsigned long status;
>>>> +		struct perf_event *event;
>>>> +		unsigned int idx;
>>>> +
>>>> +		/*
>>>> +		 * HW doesn't provide a control to atomically disable all counters.
>>>> +		 * To prevent race condition, disable all events before continuing
>>>> +		 */
>>> 
>>> I'm still doubtful that this doesn't introduce more inaccuracy overall than whatever it's trying to avoid... :/
>> It think it does. By disabling all counters, you make sure overflow status not change at the same time you are clearing
>> it(by writing zero) after reading all counters.
> 
> Urgh, *now* I get what the race is - we don't have a proper write-1-to-clear interrupt status register, so however much care we take in writing back to the overflow register there's always *some* risk of wiping out a new event when writing back, unless we ensure that no new overflows can occur *before* reading the status. What a horrible piece of hardware design... :(
> 
> Perhaps it's worth expanding the comment a little more, since apparently it's not super-obvious.

I will expand the common more to explain the race condition.
> 
> [...]
>>>> +	/*
>>>> +	 * We must NOT create groups containing mixed PMUs, although software
>>>> +	 * events are acceptable.
>>>> +	 */
>>>> +	if (event->group_leader->pmu != event->pmu &&
>>>> +			!is_software_event(event->group_leader))
>>>> +		return -EINVAL;
>>>> +
>>>> +	for_each_sibling_event(sibling, event->group_leader) {
>>>> +		if (sibling->pmu != event->pmu &&
>>>> +				!is_software_event(sibling))
>>>> +			return -EINVAL;
>>>> +	}
>>> 
>>> As before, if you can't start, stop, or read multiple counters atomically, you can't really support groups of events for this PMU either. It's impossible to measure accurate ratios with a variable amount of skew between individual counters.
>> Can you elaborate more? The only issue I know is we can’t stop all counters of same PMU atomically in IRQ handler to prevent race condition.  But it can be fixed by manually disable each counter. Other than that, every counters are working independently.
> 
> The point of groups is to be able to count two or more events for the exact same time period, in order to measure ratios between them accurately. ->add, ->del, ->read, etc. are still called one at a time for each event in the group, but those calls are made as part of a transaction, which for most drivers is achieved by perf core calling ->pmu_disable and ->pmu_enable around the other calls. Since this driver doesn't have enable/disable functionality, the individual events will count for different lengths of time depending on what order those calls are made in (which is not necessarily constant), and how long each one takes. Thus you'll end up with an indeterminate amount of error between what each count represents, and the group is not really any more accurate than if the events were simply scheduled independently, which is not how it's supposed to work.
> 
> Come to think of it, you're also not validating that groups are even schedulable - try something like:
> 
>  perf stat -e '{arm_dmc620_10008c000/clk_cycle_count/,arm_dmc620_10008c000/clk_request/,arm_dmc620_10008c000/clk_upload_stall/}' sleep 5
> 
> and observe perf core being very confused and unhappy when ->add starts failing for a group that ->event_init said was OK, since 3 events won't actually fit into the 2 available counters.
> 
> As I said before, I think you probably would be able to make groups work with some careful hooking up of snapshot functionality to ->start_txn and ->commit_txn, but to start with it'll be an awful lot simpler to just be honest and reject them.

Got it. Thanks for educating me. I will allow only one HW event then.
> 
> [...]
>>>> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
>>>> +				  "%s_%llx", DMC620_PMUNAME,
>>>> +				  (res->start) >> DMC620_PA_SHIFT);
>>> 
>>> res->start doesn't need parentheses, however I guess it might need casting to u64 to solve the build warning (I'm not sure there's any nicer way, unfortunately).
>> I will remove those parentheses, we don’t need u64 as build warning only applies when it runs compiling test with 32bit.
> 
> As above, deliberately hacking the build for the sake of not fixing clearly dodgy code is crazy. The only correct format specifier for an expression of type phys_addr_t/resource_size_t is "%pa"; if you want to use a different format then explicitly converting the argument to a type appropriate for that format (either via a simple cast or an intermediate variable) is indisputably correct, regardless of whether you might happen to get away with an implicit conversion sometimes.
> 
> The whole point of maximising COMPILE_TEST coverage is to improve code quality in order to avoid this exact situation, wherein someone copies a pattern from an existing driver only to discover that it's not actually as robust as it should be.

Got it. Will fix it

Thanks,
Tuan.

> 
> Robin.


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
  2020-10-23 18:23         ` Tuan Phan
@ 2020-10-27 11:54           ` Robin Murphy
  -1 siblings, 0 replies; 12+ messages in thread
From: Robin Murphy @ 2020-10-27 11:54 UTC (permalink / raw)
  To: Tuan Phan
  Cc: Tuan Phan, patches, Mark Rutland, Will Deacon, linux-kernel,
	linux-arm-kernel

On 2020-10-23 19:23, Tuan Phan wrote:
> 
> 
>> On Oct 23, 2020, at 6:43 AM, Robin Murphy <robin.murphy@arm.com> wrote:
>>
>> On 2020-10-22 22:46, Tuan Phan wrote:
>> [...]
>>>>> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
>>>>> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
>>>>
>>>> As per the buildbot report, GENMASK_ULL() would be appropriate when the other side is a u64 (although either way this does look a lot like reinventing FIELD_GET()...)
>>> I will add (COMPILE_TEST && 64BIT) to Kconfig so it should fix the buildbot report.
>>
>> Huh? The left-hand side of the "&" expression will always be a u64 here, which is unsigned long long. Regardless of whether an unsigned long on the right-hand side happens to be the same size, you have a semantic type mismatch, which is trivial to put right. I can't comprehend why introducing a fake build dependency to hide this would seem like a better idea than making a tiny change to make the code 100% correct and robust with zero practical impact :/
>>
>> Sure, you only copied this from the SPE driver; that doesn't mean it was ever correct, simply that the mismatch was hidden since that driver *is* tightly coupled to one particular CPU ISA.
> 
> Got it. Actually after seeing your CMN driver which has (COMPILE_TEST && 64BIT), I thought the driver should be only tested under 64BIT platform. Any reason why CMN need 64BIT with COMPILE_TEST?

The CMN driver relies heavily on 64-bit I/O accessors (readq() etc.) 
which by default aren't defined on 32-bit architectures. Thus even 
though its type usage should be clean now (it definitely wasn't back 
when that was first added!) it would still require *functional* changes 
to even pretend to have 32-bit support.

Robin.

>> [...]
>>>>> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
>>>>> +{
>>>>> +	struct dmc620_pmu_irq *irq = data;
>>>>> +	struct dmc620_pmu *dmc620_pmu;
>>>>> +	irqreturn_t ret = IRQ_NONE;
>>>>> +
>>>>> +	rcu_read_lock();
>>>>> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
>>>>> +		unsigned long status;
>>>>> +		struct perf_event *event;
>>>>> +		unsigned int idx;
>>>>> +
>>>>> +		/*
>>>>> +		 * HW doesn't provide a control to atomically disable all counters.
>>>>> +		 * To prevent race condition, disable all events before continuing
>>>>> +		 */
>>>>
>>>> I'm still doubtful that this doesn't introduce more inaccuracy overall than whatever it's trying to avoid... :/
>>> It think it does. By disabling all counters, you make sure overflow status not change at the same time you are clearing
>>> it(by writing zero) after reading all counters.
>>
>> Urgh, *now* I get what the race is - we don't have a proper write-1-to-clear interrupt status register, so however much care we take in writing back to the overflow register there's always *some* risk of wiping out a new event when writing back, unless we ensure that no new overflows can occur *before* reading the status. What a horrible piece of hardware design... :(
>>
>> Perhaps it's worth expanding the comment a little more, since apparently it's not super-obvious.
> 
> I will expand the common more to explain the race condition.
>>
>> [...]
>>>>> +	/*
>>>>> +	 * We must NOT create groups containing mixed PMUs, although software
>>>>> +	 * events are acceptable.
>>>>> +	 */
>>>>> +	if (event->group_leader->pmu != event->pmu &&
>>>>> +			!is_software_event(event->group_leader))
>>>>> +		return -EINVAL;
>>>>> +
>>>>> +	for_each_sibling_event(sibling, event->group_leader) {
>>>>> +		if (sibling->pmu != event->pmu &&
>>>>> +				!is_software_event(sibling))
>>>>> +			return -EINVAL;
>>>>> +	}
>>>>
>>>> As before, if you can't start, stop, or read multiple counters atomically, you can't really support groups of events for this PMU either. It's impossible to measure accurate ratios with a variable amount of skew between individual counters.
>>> Can you elaborate more? The only issue I know is we can’t stop all counters of same PMU atomically in IRQ handler to prevent race condition.  But it can be fixed by manually disable each counter. Other than that, every counters are working independently.
>>
>> The point of groups is to be able to count two or more events for the exact same time period, in order to measure ratios between them accurately. ->add, ->del, ->read, etc. are still called one at a time for each event in the group, but those calls are made as part of a transaction, which for most drivers is achieved by perf core calling ->pmu_disable and ->pmu_enable around the other calls. Since this driver doesn't have enable/disable functionality, the individual events will count for different lengths of time depending on what order those calls are made in (which is not necessarily constant), and how long each one takes. Thus you'll end up with an indeterminate amount of error between what each count represents, and the group is not really any more accurate than if the events were simply scheduled independently, which is not how it's supposed to work.
>>
>> Come to think of it, you're also not validating that groups are even schedulable - try something like:
>>
>>   perf stat -e '{arm_dmc620_10008c000/clk_cycle_count/,arm_dmc620_10008c000/clk_request/,arm_dmc620_10008c000/clk_upload_stall/}' sleep 5
>>
>> and observe perf core being very confused and unhappy when ->add starts failing for a group that ->event_init said was OK, since 3 events won't actually fit into the 2 available counters.
>>
>> As I said before, I think you probably would be able to make groups work with some careful hooking up of snapshot functionality to ->start_txn and ->commit_txn, but to start with it'll be an awful lot simpler to just be honest and reject them.
> 
> Got it. Thanks for educating me. I will allow only one HW event then.
>>
>> [...]
>>>>> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
>>>>> +				  "%s_%llx", DMC620_PMUNAME,
>>>>> +				  (res->start) >> DMC620_PA_SHIFT);
>>>>
>>>> res->start doesn't need parentheses, however I guess it might need casting to u64 to solve the build warning (I'm not sure there's any nicer way, unfortunately).
>>> I will remove those parentheses, we don’t need u64 as build warning only applies when it runs compiling test with 32bit.
>>
>> As above, deliberately hacking the build for the sake of not fixing clearly dodgy code is crazy. The only correct format specifier for an expression of type phys_addr_t/resource_size_t is "%pa"; if you want to use a different format then explicitly converting the argument to a type appropriate for that format (either via a simple cast or an intermediate variable) is indisputably correct, regardless of whether you might happen to get away with an implicit conversion sometimes.
>>
>> The whole point of maximising COMPILE_TEST coverage is to improve code quality in order to avoid this exact situation, wherein someone copies a pattern from an existing driver only to discover that it's not actually as robust as it should be.
> 
> Got it. Will fix it
> 
> Thanks,
> Tuan.
> 
>>
>> Robin.
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller
@ 2020-10-27 11:54           ` Robin Murphy
  0 siblings, 0 replies; 12+ messages in thread
From: Robin Murphy @ 2020-10-27 11:54 UTC (permalink / raw)
  To: Tuan Phan
  Cc: Mark Rutland, Tuan Phan, linux-kernel, patches, Will Deacon,
	linux-arm-kernel

On 2020-10-23 19:23, Tuan Phan wrote:
> 
> 
>> On Oct 23, 2020, at 6:43 AM, Robin Murphy <robin.murphy@arm.com> wrote:
>>
>> On 2020-10-22 22:46, Tuan Phan wrote:
>> [...]
>>>>> +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)			\
>>>>> +	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
>>>>
>>>> As per the buildbot report, GENMASK_ULL() would be appropriate when the other side is a u64 (although either way this does look a lot like reinventing FIELD_GET()...)
>>> I will add (COMPILE_TEST && 64BIT) to Kconfig so it should fix the buildbot report.
>>
>> Huh? The left-hand side of the "&" expression will always be a u64 here, which is unsigned long long. Regardless of whether an unsigned long on the right-hand side happens to be the same size, you have a semantic type mismatch, which is trivial to put right. I can't comprehend why introducing a fake build dependency to hide this would seem like a better idea than making a tiny change to make the code 100% correct and robust with zero practical impact :/
>>
>> Sure, you only copied this from the SPE driver; that doesn't mean it was ever correct, simply that the mismatch was hidden since that driver *is* tightly coupled to one particular CPU ISA.
> 
> Got it. Actually after seeing your CMN driver which has (COMPILE_TEST && 64BIT), I thought the driver should be only tested under 64BIT platform. Any reason why CMN need 64BIT with COMPILE_TEST?

The CMN driver relies heavily on 64-bit I/O accessors (readq() etc.) 
which by default aren't defined on 32-bit architectures. Thus even 
though its type usage should be clean now (it definitely wasn't back 
when that was first added!) it would still require *functional* changes 
to even pretend to have 32-bit support.

Robin.

>> [...]
>>>>> +static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
>>>>> +{
>>>>> +	struct dmc620_pmu_irq *irq = data;
>>>>> +	struct dmc620_pmu *dmc620_pmu;
>>>>> +	irqreturn_t ret = IRQ_NONE;
>>>>> +
>>>>> +	rcu_read_lock();
>>>>> +	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
>>>>> +		unsigned long status;
>>>>> +		struct perf_event *event;
>>>>> +		unsigned int idx;
>>>>> +
>>>>> +		/*
>>>>> +		 * HW doesn't provide a control to atomically disable all counters.
>>>>> +		 * To prevent race condition, disable all events before continuing
>>>>> +		 */
>>>>
>>>> I'm still doubtful that this doesn't introduce more inaccuracy overall than whatever it's trying to avoid... :/
>>> It think it does. By disabling all counters, you make sure overflow status not change at the same time you are clearing
>>> it(by writing zero) after reading all counters.
>>
>> Urgh, *now* I get what the race is - we don't have a proper write-1-to-clear interrupt status register, so however much care we take in writing back to the overflow register there's always *some* risk of wiping out a new event when writing back, unless we ensure that no new overflows can occur *before* reading the status. What a horrible piece of hardware design... :(
>>
>> Perhaps it's worth expanding the comment a little more, since apparently it's not super-obvious.
> 
> I will expand the common more to explain the race condition.
>>
>> [...]
>>>>> +	/*
>>>>> +	 * We must NOT create groups containing mixed PMUs, although software
>>>>> +	 * events are acceptable.
>>>>> +	 */
>>>>> +	if (event->group_leader->pmu != event->pmu &&
>>>>> +			!is_software_event(event->group_leader))
>>>>> +		return -EINVAL;
>>>>> +
>>>>> +	for_each_sibling_event(sibling, event->group_leader) {
>>>>> +		if (sibling->pmu != event->pmu &&
>>>>> +				!is_software_event(sibling))
>>>>> +			return -EINVAL;
>>>>> +	}
>>>>
>>>> As before, if you can't start, stop, or read multiple counters atomically, you can't really support groups of events for this PMU either. It's impossible to measure accurate ratios with a variable amount of skew between individual counters.
>>> Can you elaborate more? The only issue I know is we can’t stop all counters of same PMU atomically in IRQ handler to prevent race condition.  But it can be fixed by manually disable each counter. Other than that, every counters are working independently.
>>
>> The point of groups is to be able to count two or more events for the exact same time period, in order to measure ratios between them accurately. ->add, ->del, ->read, etc. are still called one at a time for each event in the group, but those calls are made as part of a transaction, which for most drivers is achieved by perf core calling ->pmu_disable and ->pmu_enable around the other calls. Since this driver doesn't have enable/disable functionality, the individual events will count for different lengths of time depending on what order those calls are made in (which is not necessarily constant), and how long each one takes. Thus you'll end up with an indeterminate amount of error between what each count represents, and the group is not really any more accurate than if the events were simply scheduled independently, which is not how it's supposed to work.
>>
>> Come to think of it, you're also not validating that groups are even schedulable - try something like:
>>
>>   perf stat -e '{arm_dmc620_10008c000/clk_cycle_count/,arm_dmc620_10008c000/clk_request/,arm_dmc620_10008c000/clk_upload_stall/}' sleep 5
>>
>> and observe perf core being very confused and unhappy when ->add starts failing for a group that ->event_init said was OK, since 3 events won't actually fit into the 2 available counters.
>>
>> As I said before, I think you probably would be able to make groups work with some careful hooking up of snapshot functionality to ->start_txn and ->commit_txn, but to start with it'll be an awful lot simpler to just be honest and reject them.
> 
> Got it. Thanks for educating me. I will allow only one HW event then.
>>
>> [...]
>>>>> +	name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
>>>>> +				  "%s_%llx", DMC620_PMUNAME,
>>>>> +				  (res->start) >> DMC620_PA_SHIFT);
>>>>
>>>> res->start doesn't need parentheses, however I guess it might need casting to u64 to solve the build warning (I'm not sure there's any nicer way, unfortunately).
>>> I will remove those parentheses, we don’t need u64 as build warning only applies when it runs compiling test with 32bit.
>>
>> As above, deliberately hacking the build for the sake of not fixing clearly dodgy code is crazy. The only correct format specifier for an expression of type phys_addr_t/resource_size_t is "%pa"; if you want to use a different format then explicitly converting the argument to a type appropriate for that format (either via a simple cast or an intermediate variable) is indisputably correct, regardless of whether you might happen to get away with an implicit conversion sometimes.
>>
>> The whole point of maximising COMPILE_TEST coverage is to improve code quality in order to avoid this exact situation, wherein someone copies a pattern from an existing driver only to discover that it's not actually as robust as it should be.
> 
> Got it. Will fix it
> 
> Thanks,
> Tuan.
> 
>>
>> Robin.
> 

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2020-10-27 11:55 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-20 23:10 [PATCH v4] driver/perf: Add PMU driver for the ARM DMC-620 memory controller Tuan Phan
2020-10-20 23:10 ` Tuan Phan
2020-10-21 10:38 ` kernel test robot
2020-10-21 10:38   ` kernel test robot
2020-10-22 17:34 ` Robin Murphy
2020-10-22 17:34   ` Robin Murphy
     [not found]   ` <1EC85DEF-8E0C-42B9-9B01-DA897147B1F7@amperemail.onmicrosoft.com>
2020-10-23 13:43     ` Robin Murphy
2020-10-23 13:43       ` Robin Murphy
2020-10-23 18:23       ` Tuan Phan
2020-10-23 18:23         ` Tuan Phan
2020-10-27 11:54         ` Robin Murphy
2020-10-27 11:54           ` Robin Murphy

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.