linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Rob Herring <robh@kernel.org>
To: Peter Zijlstra <peterz@infradead.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Will Deacon <will@kernel.org>,
	Kan Liang <kan.liang@linux.intel.com>,
	Andy Lutomirski <luto@kernel.org>
Cc: linux-kernel@vger.kernel.org, Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@redhat.com>, Namhyung Kim <namhyung@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Borislav Petkov <bp@alien8.de>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	linux-perf-users@vger.kernel.org
Subject: [RFC 2/3] perf/x86: Control RDPMC access from .enable() hook
Date: Wed, 28 Jul 2021 17:02:29 -0600	[thread overview]
Message-ID: <20210728230230.1911468-3-robh@kernel.org> (raw)
In-Reply-To: <20210728230230.1911468-1-robh@kernel.org>

Rather than controlling RDPMC access behind the scenes from switch_mm(),
move RDPMC access controls to the PMU .enable() hook. The .enable() hook
is called whenever the perf CPU or task context changes which is when
the RDPMC access may need to change.

This is the first step in moving the RDPMC state tracking out of the mm
context to the perf context.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: x86@kernel.org
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: linux-perf-users@vger.kernel.org
Signed-off-by: Rob Herring <robh@kernel.org>
---
Not sure, but I think the set_attr_rdpmc() IPI needs to hold the perf
ctx lock?


 arch/x86/events/core.c             | 75 +++++++++++++++++++-----------
 arch/x86/include/asm/mmu_context.h |  6 ---
 arch/x86/include/asm/perf_event.h  |  1 -
 arch/x86/mm/tlb.c                  | 29 +-----------
 4 files changed, 49 insertions(+), 62 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 03f87fd4c017..5c1703206ef5 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -52,8 +52,8 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
 	.pmu = &pmu,
 };

-DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
-DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
+static DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
+static DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
 DEFINE_STATIC_KEY_FALSE(perf_is_hybrid);

 /*
@@ -727,11 +727,52 @@ static void x86_pmu_disable(struct pmu *pmu)
 	static_call(x86_pmu_disable_all)();
 }

+static void perf_clear_dirty_counters(struct cpu_hw_events *cpuc)
+{
+	int i;
+
+	 /* Don't need to clear the assigned counter. */
+	for (i = 0; i < cpuc->n_events; i++)
+		__clear_bit(cpuc->assign[i], cpuc->dirty);
+
+	if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
+		return;
+
+	for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
+		/* Metrics and fake events don't have corresponding HW counters. */
+		if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
+			continue;
+		else if (i >= INTEL_PMC_IDX_FIXED)
+			wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
+		else
+			wrmsrl(x86_pmu_event_addr(i), 0);
+	}
+
+	bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
+}
+
+static void x86_pmu_set_user_access(struct cpu_hw_events *cpuc)
+{
+	if (static_branch_unlikely(&rdpmc_always_available_key) ||
+	    (!static_branch_unlikely(&rdpmc_never_available_key) &&
+	     atomic_read(&(this_cpu_read(cpu_tlbstate.loaded_mm)->context.perf_rdpmc_allowed)))) {
+		/*
+		 * Clear the existing dirty counters to
+		 * prevent the leak for an RDPMC task.
+		 */
+		perf_clear_dirty_counters(cpuc);
+		cr4_set_bits_irqsoff(X86_CR4_PCE);
+	} else
+		cr4_clear_bits_irqsoff(X86_CR4_PCE);
+}
+
 void x86_pmu_enable_all(int added)
 {
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	int idx;

+	x86_pmu_set_user_access(cpuc);
+
 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
 		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;

@@ -2476,29 +2517,9 @@ static int x86_pmu_event_init(struct perf_event *event)
 	return err;
 }

-void perf_clear_dirty_counters(void)
+static void x86_pmu_set_user_access_ipi(void *unused)
 {
-	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-	int i;
-
-	 /* Don't need to clear the assigned counter. */
-	for (i = 0; i < cpuc->n_events; i++)
-		__clear_bit(cpuc->assign[i], cpuc->dirty);
-
-	if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
-		return;
-
-	for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
-		/* Metrics and fake events don't have corresponding HW counters. */
-		if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
-			continue;
-		else if (i >= INTEL_PMC_IDX_FIXED)
-			wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
-		else
-			wrmsrl(x86_pmu_event_addr(i), 0);
-	}
-
-	bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
+	x86_pmu_set_user_access(this_cpu_ptr(&cpu_hw_events));
 }

 static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
@@ -2519,7 +2540,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
 	mmap_assert_write_locked(mm);

 	if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
-		on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
+		on_each_cpu_mask(mm_cpumask(mm), x86_pmu_set_user_access_ipi, NULL, 1);
 }

 static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
@@ -2528,7 +2549,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
 		return;

 	if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
-		on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
+		on_each_cpu_mask(mm_cpumask(mm), x86_pmu_set_user_access_ipi, NULL, 1);
 }

 static int x86_pmu_event_idx(struct perf_event *event)
@@ -2584,7 +2605,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
 		else if (x86_pmu.attr_rdpmc == 2)
 			static_branch_dec(&rdpmc_always_available_key);

-		on_each_cpu(cr4_update_pce, NULL, 1);
+		on_each_cpu(x86_pmu_set_user_access_ipi, NULL, 1);
 		x86_pmu.attr_rdpmc = val;
 	}

diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 27516046117a..1cbb32ac245e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -22,12 +22,6 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
 }
 #endif	/* !CONFIG_PARAVIRT_XXL */

-#ifdef CONFIG_PERF_EVENTS
-DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
-DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
-void cr4_update_pce(void *ignored);
-#endif
-
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
 /*
  * ldt_structs can be allocated, used, and freed, but they are never
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8fc1b5003713..544f41a179fb 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -478,7 +478,6 @@ struct x86_pmu_lbr {

 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
 extern void perf_check_microcode(void);
-extern void perf_clear_dirty_counters(void);
 extern int x86_perf_rdpmc_index(struct perf_event *event);
 #else
 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index cfe6b1e85fa6..060a3de78380 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -400,31 +400,6 @@ static void cond_ibpb(struct task_struct *next)
 	}
 }

-#ifdef CONFIG_PERF_EVENTS
-static inline void cr4_update_pce_mm(struct mm_struct *mm)
-{
-	if (static_branch_unlikely(&rdpmc_always_available_key) ||
-	    (!static_branch_unlikely(&rdpmc_never_available_key) &&
-	     atomic_read(&mm->context.perf_rdpmc_allowed))) {
-		/*
-		 * Clear the existing dirty counters to
-		 * prevent the leak for an RDPMC task.
-		 */
-		perf_clear_dirty_counters();
-		cr4_set_bits_irqsoff(X86_CR4_PCE);
-	} else
-		cr4_clear_bits_irqsoff(X86_CR4_PCE);
-}
-
-void cr4_update_pce(void *ignored)
-{
-	cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
-}
-
-#else
-static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
-#endif
-
 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 			struct task_struct *tsk)
 {
@@ -581,10 +556,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 	this_cpu_write(cpu_tlbstate.loaded_mm, next);
 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);

-	if (next != real_prev) {
-		cr4_update_pce_mm(next);
+	if (next != real_prev)
 		switch_ldt(real_prev, next);
-	}
 }

 /*
--
2.27.0

  parent reply	other threads:[~2021-07-28 23:02 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-28 23:02 [RFC 0/3] perf/x86: Rework RDPMC access handling Rob Herring
2021-07-28 23:02 ` [RFC 1/3] x86: perf: Move RDPMC event flag to a common definition Rob Herring
2021-07-28 23:02 ` Rob Herring [this message]
2021-08-12 16:50   ` [RFC 2/3] perf/x86: Control RDPMC access from .enable() hook Andy Lutomirski
2021-08-12 18:16     ` Rob Herring
2021-08-26 18:13       ` Andy Lutomirski
2021-08-26 19:09         ` Rob Herring
2021-08-27 21:10           ` Andy Lutomirski
2021-08-30  3:05             ` Vince Weaver
2021-08-30  8:51               ` Peter Zijlstra
2021-08-30 20:21                 ` Vince Weaver
2021-08-30 21:40                   ` Rob Herring
2021-08-30 20:58               ` Rob Herring
2021-07-28 23:02 ` [RFC 3/3] perf/x86: Call mmap event callbacks on event's CPU Rob Herring

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210728230230.1911468-3-robh@kernel.org \
    --to=robh@kernel.org \
    --cc=acme@kernel.org \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=jolsa@redhat.com \
    --cc=kan.liang@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).