linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] perf/core: set cgroup in cpu contexts for new cgroup events
@ 2016-08-02  7:48 David Carrillo-Cisneros
  2016-08-03 15:46 ` Vegard Nossum
  2016-08-10 17:56 ` [tip:perf/core] perf/core: Set cgroup in CPU " tip-bot for David Carrillo-Cisneros
  0 siblings, 2 replies; 5+ messages in thread
From: David Carrillo-Cisneros @ 2016-08-02  7:48 UTC (permalink / raw)
  To: linux-kernel
  Cc: x86, Ingo Molnar, Thomas Gleixner, Andi Kleen, Kan Liang,
	Peter Zijlstra, Vegard Nossum, Paul Turner, Stephane Eranian,
	David Carrillo-Cisneros

There is an optimization in perf_cgroup_sched_{in,out} that skips the
switch of cgroup events if the old and new cgroups in a task switch are
the same. This optimization interacts with the current code in two ways
that cause a cpu context's cgroup (cpuctx->cgrp) to be NULL even if a
cgroup event matches the current task. These are:

  1. On creation of the first cgroup event in a CPU: In current code,
  cpuctx->cpu is only set in perf_cgroup_sched_in, but due to the
  aforesaid optimization, perf_cgroup_sched_in will run until the next
  cgroup switches in that cpu. This may happen late or never happen,
  depending on system's number of cgroups, cpu load, etc.

  2. On deletion of the last cgroup event in a cpuctx: In list_del_event,
  cpuctx->cgrp is set NULL. Any new cgroup event will not be sched in
  because cpuctx->cgrp == NULL until a cgroup switch occurs and
  perf_cgroup_sched_in is executed (updating cpuctx->cgrp).

This patch fixes both problems by setting cpuctx->cgrp in list_add_event,
mirroring what list_del_event does when removing a cgroup event from CPU
context, as introduced in:
commit 68cacd29167b ("perf_events: Fix stale ->cgrp pointer in
update_cgrp_time_from_cpuctx()")

With this patch, cpuctx->cgrp is always set/clear when installing/removing
the first/last cgroup event in/from the cpu context. With cpuctx->cgrp
correctly set, event_filter_match works as intended when events are
sched in/out.

The problem is easy to observe in a machine with only one cgroup:

  $ perf stat -e cycles -I 1000 -C 0 -G /
  #          time             counts unit events
      1.000161699      <not counted>      cycles                    /
      2.000355591      <not counted>      cycles                    /
      3.000565154      <not counted>      cycles                    /
      4.000951350      <not counted>      cycles                    /

After the fix, the output is as expected:

  $ perf stat -e cycles -I 1000 -a -G /
  #         time             counts unit events
     1.004699159          627342882      cycles                    /
     2.007397156          615272690      cycles                    /
     3.010019057          616726074      cycles                    /

Rebased at peterz/queue/perf/core.

Changes in v2:
  - Fix build error when no CONFIG_CGROUP_PERF.
  - Unify add and del cases into list_update_cgroup_event.
  - Remove cgroup exclusive variables from builds
  without CONFIG_CGROUP_PERF.

Signed-off-by: David Carrillo-Cisneros <davidcc@google.com>
---
 include/linux/perf_event.h |  4 ++++
 kernel/events/core.c       | 54 ++++++++++++++++++++++++++++++----------------
 2 files changed, 40 insertions(+), 18 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 7921f4f..0e97ae2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -730,7 +730,9 @@ struct perf_event_context {
 	u64				parent_gen;
 	u64				generation;
 	int				pin_count;
+#ifdef CONFIG_CGROUP_PERF
 	int				nr_cgroups;	 /* cgroup evts */
+#endif
 	void				*task_ctx_data; /* pmu specific data */
 	struct rcu_head			rcu_head;
 };
@@ -756,7 +758,9 @@ struct perf_cpu_context {
 	unsigned int			hrtimer_active;
 
 	struct pmu			*unique_pmu;
+#ifdef CONFIG_CGROUP_PERF
 	struct perf_cgroup		*cgrp;
+#endif
 };
 
 struct perf_output_handle {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9345028..8c07142 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -843,6 +843,32 @@ perf_cgroup_mark_enabled(struct perf_event *event,
 		}
 	}
 }
+
+/*
+ * Update cpuctx->cgrp so that it is set when first cgroup event is added and
+ * cleared when last cgroup event is removed.
+ */
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+			 struct perf_event_context *ctx, bool add)
+{
+	struct perf_cpu_context *cpuctx;
+
+	if (!is_cgroup_event(event))
+		return;
+
+	if (add && ctx->nr_cgroups++)
+		return;
+	else if (!add && --ctx->nr_cgroups)
+		return;
+	/*
+	 * Because cgroup events are always per-cpu events,
+	 * this will always be called from the right CPU.
+	 */
+	cpuctx = __get_cpu_context(ctx);
+	cpuctx->cgrp = add ? event->cgrp : NULL;
+}
+
 #else /* !CONFIG_CGROUP_PERF */
 
 static inline bool
@@ -920,6 +946,13 @@ perf_cgroup_mark_enabled(struct perf_event *event,
 			 struct perf_event_context *ctx)
 {
 }
+
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+			 struct perf_event_context *ctx, bool add)
+{
+}
+
 #endif
 
 /*
@@ -1392,6 +1425,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
+
 	lockdep_assert_held(&ctx->lock);
 
 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1412,8 +1446,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 		list_add_tail(&event->group_entry, list);
 	}
 
-	if (is_cgroup_event(event))
-		ctx->nr_cgroups++;
+	list_update_cgroup_event(event, ctx, true);
 
 	list_add_rcu(&event->event_entry, &ctx->event_list);
 	ctx->nr_events++;
@@ -1581,8 +1614,6 @@ static void perf_group_attach(struct perf_event *event)
 static void
 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-	struct perf_cpu_context *cpuctx;
-
 	WARN_ON_ONCE(event->ctx != ctx);
 	lockdep_assert_held(&ctx->lock);
 
@@ -1594,20 +1625,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 
 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
 
-	if (is_cgroup_event(event)) {
-		ctx->nr_cgroups--;
-		/*
-		 * Because cgroup events are always per-cpu events, this will
-		 * always be called from the right CPU.
-		 */
-		cpuctx = __get_cpu_context(ctx);
-		/*
-		 * If there are no more cgroup events then clear cgrp to avoid
-		 * stale pointer in update_cgrp_time_from_cpuctx().
-		 */
-		if (!ctx->nr_cgroups)
-			cpuctx->cgrp = NULL;
-	}
+	list_update_cgroup_event(event, ctx, false);
 
 	ctx->nr_events--;
 	if (event->attr.inherit_stat)
-- 
2.8.0.rc3.226.g39d4020

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] perf/core: set cgroup in cpu contexts for new cgroup events
  2016-08-02  7:48 [PATCH v2] perf/core: set cgroup in cpu contexts for new cgroup events David Carrillo-Cisneros
@ 2016-08-03 15:46 ` Vegard Nossum
  2016-08-03 18:51   ` David Carrillo-Cisneros
  2016-08-03 21:49   ` Peter Zijlstra
  2016-08-10 17:56 ` [tip:perf/core] perf/core: Set cgroup in CPU " tip-bot for David Carrillo-Cisneros
  1 sibling, 2 replies; 5+ messages in thread
From: Vegard Nossum @ 2016-08-03 15:46 UTC (permalink / raw)
  To: David Carrillo-Cisneros
  Cc: LKML, x86, Ingo Molnar, Thomas Gleixner, Andi Kleen, Kan Liang,
	Peter Zijlstra, Paul Turner, Stephane Eranian

Hi,

On 2 August 2016 at 09:48, David Carrillo-Cisneros <davidcc@google.com> wrote:
> There is an optimization in perf_cgroup_sched_{in,out} that skips the
> switch of cgroup events if the old and new cgroups in a task switch are
> the same. This optimization interacts with the current code in two ways
> that cause a cpu context's cgroup (cpuctx->cgrp) to be NULL even if a
> cgroup event matches the current task. These are:
>
>   1. On creation of the first cgroup event in a CPU: In current code,
>   cpuctx->cpu is only set in perf_cgroup_sched_in, but due to the
>   aforesaid optimization, perf_cgroup_sched_in will run until the next
>   cgroup switches in that cpu. This may happen late or never happen,
>   depending on system's number of cgroups, cpu load, etc.
>
>   2. On deletion of the last cgroup event in a cpuctx: In list_del_event,
>   cpuctx->cgrp is set NULL. Any new cgroup event will not be sched in
>   because cpuctx->cgrp == NULL until a cgroup switch occurs and
>   perf_cgroup_sched_in is executed (updating cpuctx->cgrp).
>
> This patch fixes both problems by setting cpuctx->cgrp in list_add_event,
> mirroring what list_del_event does when removing a cgroup event from CPU
> context, as introduced in:
> commit 68cacd29167b ("perf_events: Fix stale ->cgrp pointer in
> update_cgrp_time_from_cpuctx()")
>
> With this patch, cpuctx->cgrp is always set/clear when installing/removing
> the first/last cgroup event in/from the cpu context. With cpuctx->cgrp
> correctly set, event_filter_match works as intended when events are
> sched in/out.
>
> The problem is easy to observe in a machine with only one cgroup:
>
>   $ perf stat -e cycles -I 1000 -C 0 -G /
>   #          time             counts unit events
>       1.000161699      <not counted>      cycles                    /
>       2.000355591      <not counted>      cycles                    /
>       3.000565154      <not counted>      cycles                    /
>       4.000951350      <not counted>      cycles                    /
>
> After the fix, the output is as expected:
>
>   $ perf stat -e cycles -I 1000 -a -G /
>   #         time             counts unit events
>      1.004699159          627342882      cycles                    /
>      2.007397156          615272690      cycles                    /
>      3.010019057          616726074      cycles                    /
>
> Rebased at peterz/queue/perf/core.
>
> Changes in v2:
>   - Fix build error when no CONFIG_CGROUP_PERF.
>   - Unify add and del cases into list_update_cgroup_event.
>   - Remove cgroup exclusive variables from builds
>   without CONFIG_CGROUP_PERF.
>
> Signed-off-by: David Carrillo-Cisneros <davidcc@google.com>

Is this supposed to fix the bug I reported here?

https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg1197757.html

If so, you may want to add:

1) Fixes: f2fb6bef92514 ("perf/core: Optimize side-band event delivery")
2) Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
3) a link to the thread

and I can give it a test to see if it fixes the problem I was running into.

If not, please ignore :-)

Thanks,


Vegard

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] perf/core: set cgroup in cpu contexts for new cgroup events
  2016-08-03 15:46 ` Vegard Nossum
@ 2016-08-03 18:51   ` David Carrillo-Cisneros
  2016-08-03 21:49   ` Peter Zijlstra
  1 sibling, 0 replies; 5+ messages in thread
From: David Carrillo-Cisneros @ 2016-08-03 18:51 UTC (permalink / raw)
  To: Vegard Nossum
  Cc: LKML, x86, Ingo Molnar, Thomas Gleixner, Andi Kleen, Kan Liang,
	Peter Zijlstra, Paul Turner, Stephane Eranian

Hi Vegard,

I don't think this patch fixes your bug, but it touches some code that
may be related.

David

On Wed, Aug 3, 2016 at 8:46 AM, Vegard Nossum <vegard.nossum@gmail.com> wrote:
> Hi,
>
> On 2 August 2016 at 09:48, David Carrillo-Cisneros <davidcc@google.com> wrote:
>> There is an optimization in perf_cgroup_sched_{in,out} that skips the
>> switch of cgroup events if the old and new cgroups in a task switch are
>> the same. This optimization interacts with the current code in two ways
>> that cause a cpu context's cgroup (cpuctx->cgrp) to be NULL even if a
>> cgroup event matches the current task. These are:
>>
>>   1. On creation of the first cgroup event in a CPU: In current code,
>>   cpuctx->cpu is only set in perf_cgroup_sched_in, but due to the
>>   aforesaid optimization, perf_cgroup_sched_in will run until the next
>>   cgroup switches in that cpu. This may happen late or never happen,
>>   depending on system's number of cgroups, cpu load, etc.
>>
>>   2. On deletion of the last cgroup event in a cpuctx: In list_del_event,
>>   cpuctx->cgrp is set NULL. Any new cgroup event will not be sched in
>>   because cpuctx->cgrp == NULL until a cgroup switch occurs and
>>   perf_cgroup_sched_in is executed (updating cpuctx->cgrp).
>>
>> This patch fixes both problems by setting cpuctx->cgrp in list_add_event,
>> mirroring what list_del_event does when removing a cgroup event from CPU
>> context, as introduced in:
>> commit 68cacd29167b ("perf_events: Fix stale ->cgrp pointer in
>> update_cgrp_time_from_cpuctx()")
>>
>> With this patch, cpuctx->cgrp is always set/clear when installing/removing
>> the first/last cgroup event in/from the cpu context. With cpuctx->cgrp
>> correctly set, event_filter_match works as intended when events are
>> sched in/out.
>>
>> The problem is easy to observe in a machine with only one cgroup:
>>
>>   $ perf stat -e cycles -I 1000 -C 0 -G /
>>   #          time             counts unit events
>>       1.000161699      <not counted>      cycles                    /
>>       2.000355591      <not counted>      cycles                    /
>>       3.000565154      <not counted>      cycles                    /
>>       4.000951350      <not counted>      cycles                    /
>>
>> After the fix, the output is as expected:
>>
>>   $ perf stat -e cycles -I 1000 -a -G /
>>   #         time             counts unit events
>>      1.004699159          627342882      cycles                    /
>>      2.007397156          615272690      cycles                    /
>>      3.010019057          616726074      cycles                    /
>>
>> Rebased at peterz/queue/perf/core.
>>
>> Changes in v2:
>>   - Fix build error when no CONFIG_CGROUP_PERF.
>>   - Unify add and del cases into list_update_cgroup_event.
>>   - Remove cgroup exclusive variables from builds
>>   without CONFIG_CGROUP_PERF.
>>
>> Signed-off-by: David Carrillo-Cisneros <davidcc@google.com>
>
> Is this supposed to fix the bug I reported here?
>
> https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg1197757.html
>
> If so, you may want to add:
>
> 1) Fixes: f2fb6bef92514 ("perf/core: Optimize side-band event delivery")
> 2) Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
> 3) a link to the thread
>
> and I can give it a test to see if it fixes the problem I was running into.
>
> If not, please ignore :-)
>
> Thanks,
>
>
> Vegard

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] perf/core: set cgroup in cpu contexts for new cgroup events
  2016-08-03 15:46 ` Vegard Nossum
  2016-08-03 18:51   ` David Carrillo-Cisneros
@ 2016-08-03 21:49   ` Peter Zijlstra
  1 sibling, 0 replies; 5+ messages in thread
From: Peter Zijlstra @ 2016-08-03 21:49 UTC (permalink / raw)
  To: Vegard Nossum
  Cc: David Carrillo-Cisneros, LKML, x86, Ingo Molnar, Thomas Gleixner,
	Andi Kleen, Kan Liang, Paul Turner, Stephane Eranian

On Wed, Aug 03, 2016 at 05:46:08PM +0200, Vegard Nossum wrote:
> Is this supposed to fix the bug I reported here?
> 
> https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg1197757.html
> 

Like David said, unlikely.

I only recently got back from holidays so please allow me a little time
to sort through the mailbox and get going again. I'll try and sort this
'soon'.

Thanks!

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [tip:perf/core] perf/core: Set cgroup in CPU contexts for new cgroup events
  2016-08-02  7:48 [PATCH v2] perf/core: set cgroup in cpu contexts for new cgroup events David Carrillo-Cisneros
  2016-08-03 15:46 ` Vegard Nossum
@ 2016-08-10 17:56 ` tip-bot for David Carrillo-Cisneros
  1 sibling, 0 replies; 5+ messages in thread
From: tip-bot for David Carrillo-Cisneros @ 2016-08-10 17:56 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: hpa, pjt, kan.liang, acme, eranian, alexander.shishkin,
	linux-kernel, davidcc, jolsa, tglx, torvalds, peterz, mingo,
	vegard.nossum, vincent.weaver

Commit-ID:  db4a835601b73cf8d6cd8986381d966b8e13d2d9
Gitweb:     http://git.kernel.org/tip/db4a835601b73cf8d6cd8986381d966b8e13d2d9
Author:     David Carrillo-Cisneros <davidcc@google.com>
AuthorDate: Tue, 2 Aug 2016 00:48:12 -0700
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Wed, 10 Aug 2016 13:05:52 +0200

perf/core: Set cgroup in CPU contexts for new cgroup events

There's a perf stat bug easy to observer on a machine with only one cgroup:

  $ perf stat -e cycles -I 1000 -C 0 -G /
  #          time             counts unit events
      1.000161699      <not counted>      cycles                    /
      2.000355591      <not counted>      cycles                    /
      3.000565154      <not counted>      cycles                    /
      4.000951350      <not counted>      cycles                    /

We'd expect some output there.

The underlying problem is that there is an optimization in
perf_cgroup_sched_{in,out}() that skips the switch of cgroup events
if the old and new cgroups in a task switch are the same.

This optimization interacts with the current code in two ways
that cause a CPU context's cgroup (cpuctx->cgrp) to be NULL even if a
cgroup event matches the current task. These are:

  1. On creation of the first cgroup event in a CPU: In current code,
  cpuctx->cpu is only set in perf_cgroup_sched_in, but due to the
  aforesaid optimization, perf_cgroup_sched_in will run until the next
  cgroup switches in that CPU. This may happen late or never happen,
  depending on system's number of cgroups, CPU load, etc.

  2. On deletion of the last cgroup event in a cpuctx: In list_del_event,
  cpuctx->cgrp is set NULL. Any new cgroup event will not be sched in
  because cpuctx->cgrp == NULL until a cgroup switch occurs and
  perf_cgroup_sched_in is executed (updating cpuctx->cgrp).

This patch fixes both problems by setting cpuctx->cgrp in list_add_event,
mirroring what list_del_event does when removing a cgroup event from CPU
context, as introduced in:

  commit 68cacd29167b ("perf_events: Fix stale ->cgrp pointer in update_cgrp_time_from_cpuctx()")

With this patch, cpuctx->cgrp is always set/clear when installing/removing
the first/last cgroup event in/from the CPU context. With cpuctx->cgrp
correctly set, event_filter_match works as intended when events are
sched in/out.

After the fix, the output is as expected:

  $ perf stat -e cycles -I 1000 -a -G /
  #         time             counts unit events
     1.004699159          627342882      cycles                    /
     2.007397156          615272690      cycles                    /
     3.010019057          616726074      cycles                    /

Signed-off-by: David Carrillo-Cisneros <davidcc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vegard Nossum <vegard.nossum@gmail.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/1470124092-113192-1-git-send-email-davidcc@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/perf_event.h |  4 ++++
 kernel/events/core.c       | 54 ++++++++++++++++++++++++++++++----------------
 2 files changed, 40 insertions(+), 18 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8ed43261..2b6b43c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -743,7 +743,9 @@ struct perf_event_context {
 	u64				parent_gen;
 	u64				generation;
 	int				pin_count;
+#ifdef CONFIG_CGROUP_PERF
 	int				nr_cgroups;	 /* cgroup evts */
+#endif
 	void				*task_ctx_data; /* pmu specific data */
 	struct rcu_head			rcu_head;
 };
@@ -769,7 +771,9 @@ struct perf_cpu_context {
 	unsigned int			hrtimer_active;
 
 	struct pmu			*unique_pmu;
+#ifdef CONFIG_CGROUP_PERF
 	struct perf_cgroup		*cgrp;
+#endif
 };
 
 struct perf_output_handle {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 87d02b8..1903b8f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -843,6 +843,32 @@ perf_cgroup_mark_enabled(struct perf_event *event,
 		}
 	}
 }
+
+/*
+ * Update cpuctx->cgrp so that it is set when first cgroup event is added and
+ * cleared when last cgroup event is removed.
+ */
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+			 struct perf_event_context *ctx, bool add)
+{
+	struct perf_cpu_context *cpuctx;
+
+	if (!is_cgroup_event(event))
+		return;
+
+	if (add && ctx->nr_cgroups++)
+		return;
+	else if (!add && --ctx->nr_cgroups)
+		return;
+	/*
+	 * Because cgroup events are always per-cpu events,
+	 * this will always be called from the right CPU.
+	 */
+	cpuctx = __get_cpu_context(ctx);
+	cpuctx->cgrp = add ? event->cgrp : NULL;
+}
+
 #else /* !CONFIG_CGROUP_PERF */
 
 static inline bool
@@ -920,6 +946,13 @@ perf_cgroup_mark_enabled(struct perf_event *event,
 			 struct perf_event_context *ctx)
 {
 }
+
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+			 struct perf_event_context *ctx, bool add)
+{
+}
+
 #endif
 
 /*
@@ -1392,6 +1425,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
+
 	lockdep_assert_held(&ctx->lock);
 
 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1412,8 +1446,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 		list_add_tail(&event->group_entry, list);
 	}
 
-	if (is_cgroup_event(event))
-		ctx->nr_cgroups++;
+	list_update_cgroup_event(event, ctx, true);
 
 	list_add_rcu(&event->event_entry, &ctx->event_list);
 	ctx->nr_events++;
@@ -1581,8 +1614,6 @@ static void perf_group_attach(struct perf_event *event)
 static void
 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-	struct perf_cpu_context *cpuctx;
-
 	WARN_ON_ONCE(event->ctx != ctx);
 	lockdep_assert_held(&ctx->lock);
 
@@ -1594,20 +1625,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 
 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
 
-	if (is_cgroup_event(event)) {
-		ctx->nr_cgroups--;
-		/*
-		 * Because cgroup events are always per-cpu events, this will
-		 * always be called from the right CPU.
-		 */
-		cpuctx = __get_cpu_context(ctx);
-		/*
-		 * If there are no more cgroup events then clear cgrp to avoid
-		 * stale pointer in update_cgrp_time_from_cpuctx().
-		 */
-		if (!ctx->nr_cgroups)
-			cpuctx->cgrp = NULL;
-	}
+	list_update_cgroup_event(event, ctx, false);
 
 	ctx->nr_events--;
 	if (event->attr.inherit_stat)

^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-08-10 18:45 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-02  7:48 [PATCH v2] perf/core: set cgroup in cpu contexts for new cgroup events David Carrillo-Cisneros
2016-08-03 15:46 ` Vegard Nossum
2016-08-03 18:51   ` David Carrillo-Cisneros
2016-08-03 21:49   ` Peter Zijlstra
2016-08-10 17:56 ` [tip:perf/core] perf/core: Set cgroup in CPU " tip-bot for David Carrillo-Cisneros

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).