linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "tip-bot2 for Kan Liang" <tip-bot2@linutronix.de>
To: linux-tip-commits@vger.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>,
	Kan Liang <kan.liang@linux.intel.com>,
	x86@kernel.org, linux-kernel@vger.kernel.org
Subject: [tip: perf/core] perf/x86: Track pmu in per-CPU cpu_hw_events
Date: Tue, 20 Apr 2021 10:46:49 -0000	[thread overview]
Message-ID: <161891560922.29796.12178605451839953247.tip-bot2@tip-bot2> (raw)
In-Reply-To: <1618237865-33448-4-git-send-email-kan.liang@linux.intel.com>

The following commit has been merged into the perf/core branch of tip:

Commit-ID:     61e76d53c39bb768ad264d379837cfc56b9e35b4
Gitweb:        https://git.kernel.org/tip/61e76d53c39bb768ad264d379837cfc56b9e35b4
Author:        Kan Liang <kan.liang@linux.intel.com>
AuthorDate:    Mon, 12 Apr 2021 07:30:43 -07:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Mon, 19 Apr 2021 20:03:24 +02:00

perf/x86: Track pmu in per-CPU cpu_hw_events

Some platforms, e.g. Alder Lake, have hybrid architecture. In the same
package, there may be more than one type of CPU. The PMU capabilities
are different among different types of CPU. Perf will register a
dedicated PMU for each type of CPU.

Add a 'pmu' variable in the struct cpu_hw_events to track the dedicated
PMU of the current CPU.

Current x86_get_pmu() use the global 'pmu', which will be broken on a
hybrid platform. Modify it to apply the 'pmu' of the specific CPU.

Initialize the per-CPU 'pmu' variable with the global 'pmu'. There is
nothing changed for the non-hybrid platforms.

The is_x86_event() will be updated in the later patch ("perf/x86:
Register hybrid PMUs") for hybrid platforms. For the non-hybrid
platforms, nothing is changed here.

Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1618237865-33448-4-git-send-email-kan.liang@linux.intel.com
---
 arch/x86/events/core.c       | 17 +++++++++++++----
 arch/x86/events/intel/core.c |  2 +-
 arch/x86/events/intel/ds.c   |  4 ++--
 arch/x86/events/intel/lbr.c  |  9 +++++----
 arch/x86/events/perf_event.h |  4 +++-
 5 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index dd9f3c2..a49a8bd 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -45,9 +45,11 @@
 #include "perf_event.h"
 
 struct x86_pmu x86_pmu __read_mostly;
+static struct pmu pmu;
 
 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
 	.enabled = 1,
+	.pmu = &pmu,
 };
 
 DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
@@ -724,16 +726,23 @@ void x86_pmu_enable_all(int added)
 	}
 }
 
-static struct pmu pmu;
-
 static inline int is_x86_event(struct perf_event *event)
 {
 	return event->pmu == &pmu;
 }
 
-struct pmu *x86_get_pmu(void)
+struct pmu *x86_get_pmu(unsigned int cpu)
 {
-	return &pmu;
+	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+	/*
+	 * All CPUs of the hybrid type have been offline.
+	 * The x86_get_pmu() should not be invoked.
+	 */
+	if (WARN_ON_ONCE(!cpuc->pmu))
+		return &pmu;
+
+	return cpuc->pmu;
 }
 /*
  * Event scheduler state:
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 7bbb5bb..f116c63 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4876,7 +4876,7 @@ static void update_tfa_sched(void *ignored)
 	 * and if so force schedule out for all event types all contexts
 	 */
 	if (test_bit(3, cpuc->active_mask))
-		perf_pmu_resched(x86_get_pmu());
+		perf_pmu_resched(x86_get_pmu(smp_processor_id()));
 }
 
 static ssize_t show_sysctl_tfa(struct device *cdev,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 7ebae18..1bfea8c 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2192,7 +2192,7 @@ void __init intel_ds_init(void)
 					PERF_SAMPLE_TIME;
 				x86_pmu.flags |= PMU_FL_PEBS_ALL;
 				pebs_qual = "-baseline";
-				x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
+				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
 			} else {
 				/* Only basic record supported */
 				x86_pmu.large_pebs_flags &=
@@ -2207,7 +2207,7 @@ void __init intel_ds_init(void)
 
 			if (x86_pmu.intel_cap.pebs_output_pt_available) {
 				pr_cont("PEBS-via-PT, ");
-				x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
 			}
 
 			break;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 21890da..bb4486c 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -705,7 +705,7 @@ void intel_pmu_lbr_add(struct perf_event *event)
 
 void release_lbr_buffers(void)
 {
-	struct kmem_cache *kmem_cache = x86_get_pmu()->task_ctx_cache;
+	struct kmem_cache *kmem_cache;
 	struct cpu_hw_events *cpuc;
 	int cpu;
 
@@ -714,6 +714,7 @@ void release_lbr_buffers(void)
 
 	for_each_possible_cpu(cpu) {
 		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
 		if (kmem_cache && cpuc->lbr_xsave) {
 			kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
 			cpuc->lbr_xsave = NULL;
@@ -1609,7 +1610,7 @@ void intel_pmu_lbr_init_hsw(void)
 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
 
-	x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
 
 	if (lbr_from_signext_quirk_needed())
 		static_branch_enable(&lbr_from_quirk_key);
@@ -1629,7 +1630,7 @@ __init void intel_pmu_lbr_init_skl(void)
 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
 
-	x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
 
 	/*
 	 * SW branch filter usage:
@@ -1726,7 +1727,7 @@ static bool is_arch_lbr_xsave_available(void)
 
 void __init intel_pmu_arch_lbr_init(void)
 {
-	struct pmu *pmu = x86_get_pmu();
+	struct pmu *pmu = x86_get_pmu(smp_processor_id());
 	union cpuid28_eax eax;
 	union cpuid28_ebx ebx;
 	union cpuid28_ecx ecx;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 54a340e..da947d3 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -326,6 +326,8 @@ struct cpu_hw_events {
 	int				n_pair; /* Large increment events */
 
 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
+
+	struct pmu			*pmu;
 };
 
 #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {	\
@@ -904,7 +906,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = {		\
 	.event_str_ht	= ht,						\
 }
 
-struct pmu *x86_get_pmu(void);
+struct pmu *x86_get_pmu(unsigned int cpu);
 extern struct x86_pmu x86_pmu __read_mostly;
 
 static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)

  reply	other threads:[~2021-04-20 10:48 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-12 14:30 [PATCH V6 00/25] Add Alder Lake support for perf (kernel) kan.liang
2021-04-12 14:30 ` [PATCH V6 01/25] x86/cpufeatures: Enumerate Intel Hybrid Technology feature bit kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Ricardo Neri
2021-04-12 14:30 ` [PATCH V6 02/25] x86/cpu: Add helper function to get the type of the current hybrid CPU kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Ricardo Neri
2023-05-05 16:42     ` Dave Hansen
2023-05-10 18:18       ` Ricardo Neri
2023-05-10 18:33         ` Dave Hansen
2023-05-10 19:15           ` Luck, Tony
2023-05-10 23:56           ` Ricardo Neri
2021-04-12 14:30 ` [PATCH V6 03/25] perf/x86: Track pmu in per-CPU cpu_hw_events kan.liang
2021-04-20 10:46   ` tip-bot2 for Kan Liang [this message]
2021-04-12 14:30 ` [PATCH V6 04/25] perf/x86/intel: Hybrid PMU support for perf capabilities kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 05/25] perf/x86: Hybrid PMU support for intel_ctrl kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 06/25] perf/x86: Hybrid PMU support for counters kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 07/25] perf/x86: Hybrid PMU support for unconstrained kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 08/25] perf/x86: Hybrid PMU support for hardware cache event kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 09/25] perf/x86: Hybrid PMU support for event constraints kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 10/25] perf/x86: Hybrid PMU support for extra_regs kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 11/25] perf/x86/intel: Factor out intel_pmu_check_num_counters kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 12/25] perf/x86/intel: Factor out intel_pmu_check_event_constraints kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 13/25] perf/x86/intel: Factor out intel_pmu_check_extra_regs kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 14/25] perf/x86: Remove temporary pmu assignment in event_init kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 15/25] perf/x86: Factor out x86_pmu_show_pmu_cap kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 16/25] perf/x86: Register hybrid PMUs kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 17/25] perf/x86: Add structures for the attributes of Hybrid PMUs kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 18/25] perf/x86/intel: Add attr_update for " kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:30 ` [PATCH V6 19/25] perf/x86: Support filter_match callback kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:31 ` [PATCH V6 20/25] perf/x86/intel: Add Alder Lake Hybrid support kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:31 ` [PATCH V6 21/25] perf: Extend PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:31 ` [PATCH V6 22/25] perf/x86/intel/uncore: Add Alder Lake support kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:31 ` [PATCH V6 23/25] perf/x86/msr: Add Alder Lake CPU support kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:31 ` [PATCH V6 24/25] perf/x86/cstate: " kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Kan Liang
2021-04-12 14:31 ` [PATCH V6 25/25] perf/x86/rapl: Add support for Intel Alder Lake kan.liang
2021-04-20 10:46   ` [tip: perf/core] " tip-bot2 for Zhang Rui

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=161891560922.29796.12178605451839953247.tip-bot2@tip-bot2 \
    --to=tip-bot2@linutronix.de \
    --cc=kan.liang@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).