From: Peter Zijlstra <peterz@infradead.org>
To: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: acme@kernel.org, alexander.shishkin@linux.intel.com,
jolsa@redhat.com, namhyung@kernel.org, songliubraving@fb.com,
eranian@google.com, alexey.budankov@linux.intel.com,
ak@linux.intel.com, mark.rutland@arm.com, megha.dey@intel.com,
frederic@kernel.org, maddy@linux.ibm.com, irogers@google.com,
kim.phillips@amd.com, linux-kernel@vger.kernel.org,
santosh.shukla@amd.com
Subject: Re: [RFC v2] perf: Rewrite core context handling
Date: Fri, 17 Jun 2022 15:36:51 +0200 [thread overview]
Message-ID: <YqyDc0i3C9OpNiPx@worktop.programming.kicks-ass.net> (raw)
In-Reply-To: <YqdLH+ZU/sf4n0pa@hirez.programming.kicks-ass.net>
On Mon, Jun 13, 2022 at 04:35:11PM +0200, Peter Zijlstra wrote:
> +/* XXX: No need of list now. Convert it to per-cpu variable */
> static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
Something like so I suppose...
---
include/linux/perf_event.h | 1
kernel/events/core.c | 70 ++++++++++++++-------------------------------
2 files changed, 22 insertions(+), 49 deletions(-)
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -936,7 +936,6 @@ struct perf_cpu_context {
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
- struct list_head cgrp_cpuctx_entry;
#endif
/*
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -829,55 +829,41 @@ perf_cgroup_set_timestamp(struct perf_cp
}
}
-/* XXX: No need of list now. Convert it to per-cpu variable */
-static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
-
/*
* reschedule events based on the cgroup constraint of task.
*/
static void perf_cgroup_switch(struct task_struct *task)
{
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(&cpu_context);
struct perf_cgroup *cgrp;
- struct perf_cpu_context *cpuctx, *tmp;
struct list_head *list;
unsigned long flags;
- /*
- * Disable interrupts and preemption to avoid this CPU's
- * cgrp_cpuctx_entry to change under us.
- */
- local_irq_save(flags);
-
cgrp = perf_cgroup_from_task(task, NULL);
- list = this_cpu_ptr(&cgrp_cpuctx_list);
- list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) {
- WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
- if (READ_ONCE(cpuctx->cgrp) == cgrp)
- continue;
-
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
- perf_ctx_disable(&cpuctx->ctx);
+ WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
+ if (READ_ONCE(cpuctx->cgrp) == cgrp)
+ continue;
- ctx_sched_out(&cpuctx->ctx, EVENT_ALL);
- /*
- * must not be done before ctxswout due
- * to update_cgrp_time_from_cpuctx() in
- * ctx_sched_out()
- */
- cpuctx->cgrp = cgrp;
- /*
- * set cgrp before ctxsw in to allow
- * perf_cgroup_set_timestamp() in ctx_sched_in()
- * to not have to pass task around
- */
- ctx_sched_in(&cpuctx->ctx, EVENT_ALL);
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+ perf_ctx_disable(&cpuctx->ctx);
- perf_ctx_enable(&cpuctx->ctx);
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
- }
+ ctx_sched_out(&cpuctx->ctx, EVENT_ALL);
+ /*
+ * must not be done before ctxswout due
+ * to update_cgrp_time_from_cpuctx() in
+ * ctx_sched_out()
+ */
+ cpuctx->cgrp = cgrp;
+ /*
+ * set cgrp before ctxsw in to allow
+ * perf_cgroup_set_timestamp() in ctx_sched_in()
+ * to not have to pass task around
+ */
+ ctx_sched_in(&cpuctx->ctx, EVENT_ALL);
- local_irq_restore(flags);
+ perf_ctx_enable(&cpuctx->ctx);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
static int perf_cgroup_ensure_storage(struct perf_event *event,
@@ -979,8 +965,6 @@ perf_cgroup_event_enable(struct perf_eve
return;
cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
- list_add(&cpuctx->cgrp_cpuctx_entry,
- per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
}
static inline void
@@ -1001,7 +985,6 @@ perf_cgroup_event_disable(struct perf_ev
return;
cpuctx->cgrp = NULL;
- list_del(&cpuctx->cgrp_cpuctx_entry);
}
#else /* !CONFIG_CGROUP_PERF */
@@ -2372,11 +2355,7 @@ static void perf_remove_from_context(str
* event_function_call() user.
*/
raw_spin_lock_irq(&ctx->lock);
- /*
- * Cgroup events are per-cpu events, and must IPI because of
- * cgrp_cpuctx_list.
- */
- if (!ctx->is_active && !is_cgroup_event(event)) {
+ if (!ctx->is_active) {
__perf_remove_from_context(event, this_cpu_ptr(&cpu_context),
ctx, (void *)flags);
raw_spin_unlock_irq(&ctx->lock);
@@ -2807,8 +2786,6 @@ perf_install_in_context(struct perf_even
* perf_event_attr::disabled events will not run and can be initialized
* without IPI. Except when this is the first event for the context, in
* that case we need the magic of the IPI to set ctx->is_active.
- * Similarly, cgroup events for the context also needs the IPI to
- * manipulate the cgrp_cpuctx_list.
*
* The IOC_ENABLE that is sure to follow the creation of a disabled
* event will issue the IPI and reprogram the hardware.
@@ -13301,9 +13278,6 @@ static void __init perf_event_init_all_c
INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
-#ifdef CONFIG_CGROUP_PERF
- INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
-#endif
INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
cpuctx = per_cpu_ptr(&cpu_context, cpu);
next prev parent reply other threads:[~2022-06-17 13:37 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-13 13:47 [RFC v2] perf: Rewrite core context handling Ravi Bangoria
2022-01-17 7:18 ` [perf] f7cf7134e4: WARNING:at_kernel/events/core.c:#__pmu_ctx_sched_out kernel test robot
2022-01-31 4:43 ` [RFC v2] perf: Rewrite core context handling Ravi Bangoria
2022-06-13 14:35 ` Peter Zijlstra
2022-06-13 14:36 ` Peter Zijlstra
2022-06-13 14:38 ` Peter Zijlstra
2022-08-02 6:11 ` Ravi Bangoria
2022-08-22 15:29 ` Peter Zijlstra
2022-08-22 15:43 ` Peter Zijlstra
2022-08-22 16:37 ` Ravi Bangoria
2022-08-23 4:20 ` Ravi Bangoria
2022-08-29 3:54 ` Ravi Bangoria
2022-08-23 6:30 ` Peter Zijlstra
2022-08-29 4:00 ` Ravi Bangoria
2022-08-29 11:58 ` Peter Zijlstra
2022-08-22 16:52 ` Peter Zijlstra
2022-08-23 4:57 ` Ravi Bangoria
2022-06-13 14:41 ` Peter Zijlstra
2022-08-22 14:38 ` Ravi Bangoria
2022-06-13 14:43 ` Peter Zijlstra
2022-08-02 6:16 ` Ravi Bangoria
2022-08-23 8:57 ` Peter Zijlstra
2022-08-24 5:07 ` Ravi Bangoria
2022-08-24 7:27 ` Peter Zijlstra
2022-08-24 7:53 ` Ravi Bangoria
2022-06-13 14:55 ` Peter Zijlstra
2022-08-02 6:10 ` Ravi Bangoria
2022-08-22 16:44 ` Peter Zijlstra
2022-08-23 4:46 ` Ravi Bangoria
2022-06-17 13:36 ` Peter Zijlstra [this message]
2022-08-24 10:13 ` Peter Zijlstra
2022-06-27 4:18 ` Ravi Bangoria
2022-08-02 6:06 ` Ravi Bangoria
2022-08-24 12:15 ` Peter Zijlstra
2022-08-24 14:59 ` Peter Zijlstra
2022-08-25 5:39 ` Ravi Bangoria
2022-08-25 9:17 ` Peter Zijlstra
2022-08-25 11:03 ` Ravi Bangoria
2022-08-02 6:13 ` Ravi Bangoria
2022-08-23 7:10 ` Peter Zijlstra
2022-08-02 6:17 ` Ravi Bangoria
2022-08-23 7:26 ` Peter Zijlstra
2022-08-23 15:14 ` Ravi Bangoria
2022-08-22 14:40 ` Ravi Bangoria
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=YqyDc0i3C9OpNiPx@worktop.programming.kicks-ass.net \
--to=peterz@infradead.org \
--cc=acme@kernel.org \
--cc=ak@linux.intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=alexey.budankov@linux.intel.com \
--cc=eranian@google.com \
--cc=frederic@kernel.org \
--cc=irogers@google.com \
--cc=jolsa@redhat.com \
--cc=kim.phillips@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=maddy@linux.ibm.com \
--cc=mark.rutland@arm.com \
--cc=megha.dey@intel.com \
--cc=namhyung@kernel.org \
--cc=ravi.bangoria@amd.com \
--cc=santosh.shukla@amd.com \
--cc=songliubraving@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).