From: Bharat Bhushan <bbhushan2@marvell.com>
To: <will@kernel.org>, <mark.rutland@arm.com>, <robh+dt@kernel.org>,
<linux-arm-kernel@lists.infradead.org>,
<devicetree@vger.kernel.org>, <linux-kernel@vger.kernel.org>
Cc: Bharat Bhushan <bbhushan2@marvell.com>
Subject: [PATCH v4 4/4] perf/marvell: cn10k DDR perf event core ownership
Date: Mon, 20 Sep 2021 10:38:23 +0530 [thread overview]
Message-ID: <20210920050823.10853-5-bbhushan2@marvell.com> (raw)
In-Reply-To: <20210920050823.10853-1-bbhushan2@marvell.com>
As DDR perf event counters are not per core, so they should be accessed
only by one core at a time. Select new core when previously owning core
is going offline.
Signed-off-by: Bharat Bhushan <bbhushan2@marvell.com>
---
v1->v2->v3->v4:
- No change
drivers/perf/marvell_cn10k_ddr_pmu.c | 50 ++++++++++++++++++++++++++--
include/linux/cpuhotplug.h | 1 +
2 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 21fccb9090c5..bef0cee3a46a 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -129,6 +129,7 @@ struct cn10k_ddr_pmu {
int active_events;
struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
struct hrtimer hrtimer;
+ struct hlist_node node;
};
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
@@ -610,6 +611,24 @@ static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
+static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
+ node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+ return 0;
+}
+
static int cn10k_ddr_perf_probe(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu;
@@ -661,18 +680,31 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+ cpuhp_state_add_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
if (ret)
- return ret;
+ goto error;
pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
return 0;
+error:
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+ return ret;
}
static int cn10k_ddr_perf_remove(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
perf_pmu_unregister(&ddr_pmu->pmu);
return 0;
}
@@ -695,12 +727,26 @@ static struct platform_driver cn10k_ddr_pmu_driver = {
static int __init cn10k_ddr_pmu_init(void)
{
- return platform_driver_register(&cn10k_ddr_pmu_driver);
+ int ret;
+
+ ret = cpuhp_setup_state_multi(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ "perf/marvell/cn10k/ddr:online", NULL,
+ cn10k_ddr_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&cn10k_ddr_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+ return ret;
}
static void __exit cn10k_ddr_pmu_exit(void)
{
platform_driver_unregister(&cn10k_ddr_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
}
module_init(cn10k_ddr_pmu_init);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 832d8a74fa59..a4b521f12b58 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -227,6 +227,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
--
2.17.1
next prev parent reply other threads:[~2021-09-20 5:09 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-20 5:08 [PATCH v4 0/4] cn10k DDR Performance monitor support Bharat Bhushan
2021-09-20 5:08 ` [PATCH v4 1/4] dt-bindings: perf: marvell: cn10k ddr performance monitor Bharat Bhushan
2021-10-12 8:01 ` Bharat Bhushan
2021-09-20 5:08 ` [PATCH v4 2/4] perf/marvell: CN10k DDR performance monitor support Bharat Bhushan
2021-10-12 8:01 ` Bharat Bhushan
2021-10-12 9:16 ` Bhaskara Budiredla
2021-09-20 5:08 ` [PATCH v4 3/4] perf/marvell: cn10k DDR perfmon event overflow handling Bharat Bhushan
2021-10-12 8:01 ` Bharat Bhushan
2021-09-20 5:08 ` Bharat Bhushan [this message]
2021-10-12 8:02 ` [PATCH v4 4/4] perf/marvell: cn10k DDR perf event core ownership Bharat Bhushan
2021-09-29 9:04 ` [PATCH v4 0/4] cn10k DDR Performance monitor support Bharat Bhushan
2021-10-12 8:01 ` Bharat Bhushan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210920050823.10853-5-bbhushan2@marvell.com \
--to=bbhushan2@marvell.com \
--cc=devicetree@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=robh+dt@kernel.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).