linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] more cpu hotplug fail
@ 2017-04-18 10:32 Peter Zijlstra
  2017-04-18 10:32 ` [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code Peter Zijlstra
                   ` (2 more replies)
  0 siblings, 3 replies; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-18 10:32 UTC (permalink / raw)
  To: jbaron, rostedt, tglx, mingo, bigeasy, peterz; +Cc: linux-kernel

Now that the CPU hotplug lock is a regular per-cpu rwsem it can no longer nest
read-sides.

Fix two such sites in perf.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code
  2017-04-18 10:32 [PATCH 0/3] more cpu hotplug fail Peter Zijlstra
@ 2017-04-18 10:32 ` Peter Zijlstra
  2017-04-20 11:29   ` [tip:smp/hotplug] " tip-bot for Peter Zijlstra (Intel)
  2017-04-21 16:08   ` [PATCH 1/3] " Jason Baron
  2017-04-18 10:32 ` [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp() Peter Zijlstra
  2017-04-18 10:32 ` [PATCH 3/3] perf: Avoid cpu_hotplug_lock r-r recursion Peter Zijlstra
  2 siblings, 2 replies; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-18 10:32 UTC (permalink / raw)
  To: jbaron, rostedt, tglx, mingo, bigeasy, peterz; +Cc: linux-kernel

[-- Attachment #1: peterz-jump_label-pull-up-hotplug.patch --]
[-- Type: text/plain, Size: 3680 bytes --]

This change does two things; it moves the get_online_cpus() call into
generic code, with the aim of later providing some static_key ops that
avoid it.

And as a side effect it inverts the relation between cpu_hotplug_lock
and jump_label_mutex.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct ju
 		insn.word = 0; /* nop */
 	}
 
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
 		insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct ju
 			   (unsigned long)insn_p + sizeof(*insn_p));
 
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 #endif /* HAVE_JUMP_LABEL */
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct ju
 		val = 0x01000000;
 	}
 
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	*insn = val;
 	flushi(insn);
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 #endif
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
@@ -45,14 +45,12 @@ static void __jump_label_transform(struc
 void arch_jump_label_transform(struct jump_entry *e,
 				enum jump_label_type type)
 {
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 
 	__jump_label_transform(e, type);
 	flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
 
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -105,11 +105,9 @@ static void __jump_label_transform(struc
 void arch_jump_label_transform(struct jump_entry *entry,
 			       enum jump_label_type type)
 {
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	__jump_label_transform(entry, type, NULL, 0);
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 static enum {
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
 #include <linux/static_key.h>
 #include <linux/jump_label_ratelimit.h>
 #include <linux/bug.h>
+#include <linux/cpu.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -124,6 +125,12 @@ void static_key_slow_inc(struct static_k
 			return;
 	}
 
+	/*
+	 * A number of architectures need to synchronize I$ across
+	 * the all CPUs, for that to be serialized against CPU hot-plug
+	 * we need to avoid CPUs coming online.
+	 */
+	get_online_cpus();
 	jump_label_lock();
 	if (atomic_read(&key->enabled) == 0) {
 		atomic_set(&key->enabled, -1);
@@ -133,6 +140,7 @@ void static_key_slow_inc(struct static_k
 		atomic_inc(&key->enabled);
 	}
 	jump_label_unlock();
+	put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
@@ -146,6 +154,7 @@ static void __static_key_slow_dec(struct
 	 * returns is unbalanced, because all other static_key_slow_inc()
 	 * instances block while the update is in progress.
 	 */
+	get_online_cpus();
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 		WARN(atomic_read(&key->enabled) < 0,
 		     "jump label: negative count!\n");
@@ -159,6 +168,7 @@ static void __static_key_slow_dec(struct
 		jump_label_update(key);
 	}
 	jump_label_unlock();
+	put_online_cpus();
 }
 
 static void jump_label_update_timeout(struct work_struct *work)
@@ -592,6 +602,10 @@ jump_label_module_notify(struct notifier
 
 	switch (val) {
 	case MODULE_STATE_COMING:
+		/*
+		 * XXX do we need get_online_cpus() ?  the module isn't
+		 * executable yet, so nothing should be looking at our code.
+		 */
 		jump_label_lock();
 		ret = jump_label_add_module(mod);
 		if (ret) {

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-18 10:32 [PATCH 0/3] more cpu hotplug fail Peter Zijlstra
  2017-04-18 10:32 ` [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code Peter Zijlstra
@ 2017-04-18 10:32 ` Peter Zijlstra
  2017-04-18 13:03   ` Peter Zijlstra
  2017-04-20 11:29   ` [tip:smp/hotplug] jump_label: Provide static_key_slow_inc_cpuslocked() tip-bot for Peter Zijlstra (Intel)
  2017-04-18 10:32 ` [PATCH 3/3] perf: Avoid cpu_hotplug_lock r-r recursion Peter Zijlstra
  2 siblings, 2 replies; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-18 10:32 UTC (permalink / raw)
  To: jbaron, rostedt, tglx, mingo, bigeasy, peterz; +Cc: linux-kernel

[-- Attachment #1: peterz-jump_label-expose-hotplug.patch --]
[-- Type: text/plain, Size: 3197 bytes --]

Provide static_key_slow_inc_nohp(), a variant that doesn't take
cpu_hotplug_lock().

XXX maybe add an assertion that it is taken.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/jump_label.h |    3 +++
 kernel/jump_label.c        |   21 +++++++++++++++++----
 2 files changed, 20 insertions(+), 4 deletions(-)

--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -158,6 +158,7 @@ extern void arch_jump_label_transform_st
 					     enum jump_label_type type);
 extern int jump_label_text_reserved(void *start, void *end);
 extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_inc_nohp(struct static_key *key);
 extern void static_key_slow_dec(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 extern int static_key_count(struct static_key *key);
@@ -213,6 +214,8 @@ static inline void static_key_slow_inc(s
 	atomic_inc(&key->enabled);
 }
 
+#define static_key_slow_inc_nohp static_key_slow_inc
+
 static inline void static_key_slow_dec(struct static_key *key)
 {
 	STATIC_KEY_CHECK_USE();
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -100,7 +100,7 @@ void static_key_disable(struct static_ke
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-void static_key_slow_inc(struct static_key *key)
+void __static_key_slow_inc(struct static_key *key)
 {
 	int v, v1;
 
@@ -129,7 +129,6 @@ void static_key_slow_inc(struct static_k
 	 * the all CPUs, for that to be serialized against CPU hot-plug
 	 * we need to avoid CPUs coming online.
 	 */
-	get_online_cpus();
 	jump_label_lock();
 	if (atomic_read(&key->enabled) == 0) {
 		atomic_set(&key->enabled, -1);
@@ -139,10 +138,22 @@ void static_key_slow_inc(struct static_k
 		atomic_inc(&key->enabled);
 	}
 	jump_label_unlock();
+}
+
+void static_key_slow_inc(struct static_key *key)
+{
+	get_online_cpus();
+	__static_key_slow_inc(key);
 	put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
+void static_key_slow_inc_nohp(struct static_key *key)
+{
+	__static_key_slow_inc(key);
+}
+EXPORT_SYMBOL_GPL(static_key_slow_inc_nohp);
+
 static void __static_key_slow_dec(struct static_key *key,
 		unsigned long rate_limit, struct delayed_work *work)
 {
@@ -153,7 +164,6 @@ static void __static_key_slow_dec(struct
 	 * returns is unbalanced, because all other static_key_slow_inc()
 	 * instances block while the update is in progress.
 	 */
-	get_online_cpus();
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 		WARN(atomic_read(&key->enabled) < 0,
 		     "jump label: negative count!\n");
@@ -167,20 +177,23 @@ static void __static_key_slow_dec(struct
 		jump_label_update(key);
 	}
 	jump_label_unlock();
-	put_online_cpus();
 }
 
 static void jump_label_update_timeout(struct work_struct *work)
 {
 	struct static_key_deferred *key =
 		container_of(work, struct static_key_deferred, work.work);
+	get_online_cpus();
 	__static_key_slow_dec(&key->key, 0, NULL);
+	put_online_cpus();
 }
 
 void static_key_slow_dec(struct static_key *key)
 {
 	STATIC_KEY_CHECK_USE();
+	get_online_cpus();
 	__static_key_slow_dec(key, 0, NULL);
+	put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 3/3] perf: Avoid cpu_hotplug_lock r-r recursion
  2017-04-18 10:32 [PATCH 0/3] more cpu hotplug fail Peter Zijlstra
  2017-04-18 10:32 ` [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code Peter Zijlstra
  2017-04-18 10:32 ` [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp() Peter Zijlstra
@ 2017-04-18 10:32 ` Peter Zijlstra
  2017-04-20 11:30   ` [tip:smp/hotplug] " tip-bot for Peter Zijlstra (Intel)
  2 siblings, 1 reply; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-18 10:32 UTC (permalink / raw)
  To: jbaron, rostedt, tglx, mingo, bigeasy, peterz; +Cc: linux-kernel

[-- Attachment #1: peterz-perf-fix-hp-fail.patch --]
[-- Type: text/plain, Size: 925 bytes --]

There are two call-sites where using static_key results in recursing
on the cpu_hotplug_lock.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/events/core.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7785,7 +7785,7 @@ static int perf_swevent_init(struct perf
 		if (err)
 			return err;
 
-		static_key_slow_inc(&perf_swevent_enabled[event_id]);
+		static_key_slow_inc_nohp(&perf_swevent_enabled[event_id]);
 		event->destroy = sw_perf_event_destroy;
 	}
 
@@ -9294,7 +9294,7 @@ static void account_event(struct perf_ev
 
 		mutex_lock(&perf_sched_mutex);
 		if (!atomic_read(&perf_sched_count)) {
-			static_branch_enable(&perf_sched_events);
+			static_key_slow_inc_nohp(&perf_sched_events.key);
 			/*
 			 * Guarantee that all CPUs observe they key change and
 			 * call the perf scheduling hooks before proceeding to

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-18 10:32 ` [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp() Peter Zijlstra
@ 2017-04-18 13:03   ` Peter Zijlstra
  2017-04-18 16:46     ` Steven Rostedt
  2017-04-20 11:29   ` [tip:smp/hotplug] jump_label: Provide static_key_slow_inc_cpuslocked() tip-bot for Peter Zijlstra (Intel)
  1 sibling, 1 reply; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-18 13:03 UTC (permalink / raw)
  To: jbaron, rostedt, tglx, mingo, bigeasy; +Cc: linux-kernel

On Tue, Apr 18, 2017 at 12:32:15PM +0200, Peter Zijlstra wrote:
> XXX maybe add an assertion that it is taken.

Compile tested only..

---
Subject: hotplug,lockdep: Verify the hotplut lock assumptions
From: Peter Zijlstra <peterz@infradead.org>
Date: Tue Apr 18 14:49:09 CEST 2017

With the recent work to avoid recursive hotplug read locks, various code
paths grew the assumption that hotplug lock is held.

Make sure to verify these assumptions to avoid future broken code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/cpu.h   |    2 ++
 kernel/cpu.c          |    7 +++++++
 kernel/jump_label.c   |    2 ++
 kernel/padata.c       |    3 +--
 kernel/stop_machine.c |    2 ++
 5 files changed, 14 insertions(+), 2 deletions(-)

--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -105,6 +105,7 @@ extern void cpu_hotplug_begin(void);
 extern void cpu_hotplug_done(void);
 extern void get_online_cpus(void);
 extern void put_online_cpus(void);
+extern void lockdep_assert_hotplug_held(void);
 extern void cpu_hotplug_disable(void);
 extern void cpu_hotplug_enable(void);
 void clear_tasks_mm_cpumask(int cpu);
@@ -118,6 +119,7 @@ static inline void cpu_hotplug_done(void
 #define put_online_cpus()	do { } while (0)
 #define cpu_hotplug_disable()	do { } while (0)
 #define cpu_hotplug_enable()	do { } while (0)
+static inline void lockdep_assert_hotplug_held(void) {}
 #endif		/* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_PM_SLEEP_SMP
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -229,6 +229,11 @@ void cpu_hotplug_done(void)
 	percpu_up_write(&cpu_hotplug_lock);
 }
 
+void lockdep_assert_hotplug_held(void)
+{
+	percpu_rwsem_assert_held(&cpu_hotplug_lock);
+}
+
 /*
  * Wait for currently running CPU hotplug operations to complete (if any) and
  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
@@ -1398,6 +1403,8 @@ int __cpuhp_setup_state_locked(enum cpuh
 	int cpu, ret = 0;
 	bool dynstate;
 
+	lockdep_assert_hotplug_held();
+
 	if (cpuhp_cb_check(state) || !name)
 		return -EINVAL;
 
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -130,6 +130,7 @@ void __static_key_slow_inc(struct static
 	 * the all CPUs, for that to be serialized against CPU hot-plug
 	 * we need to avoid CPUs coming online.
 	 */
+	lockdep_assert_hotplug_held();
 	jump_label_lock();
 	if (atomic_read(&key->enabled) == 0) {
 		atomic_set(&key->enabled, -1);
@@ -158,6 +159,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc_no
 static void __static_key_slow_dec(struct static_key *key,
 		unsigned long rate_limit, struct delayed_work *work)
 {
+	lockdep_assert_hotplug_held();
 	/*
 	 * The negative count check is valid even when a negative
 	 * key->enabled is in use by static_key_slow_inc(); a
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1008,11 +1008,10 @@ static struct padata_instance *padata_al
  *                         parallel workers.
  *
  * @wq: workqueue to use for the allocated padata instance
- *
- * Must be called from a get_online_cpus() protected region
  */
 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
 {
+	lockdep_assert_hotplug_held();
 	return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
 }
 EXPORT_SYMBOL(padata_alloc_possible);
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -561,6 +561,8 @@ int stop_machine_locked(cpu_stop_fn_t fn
 		.active_cpus = cpus,
 	};
 
+	lockdep_assert_hotplug_held();
+
 	if (!stop_machine_initialized) {
 		/*
 		 * Handle the case where stop_machine() is called

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-18 13:03   ` Peter Zijlstra
@ 2017-04-18 16:46     ` Steven Rostedt
  2017-04-18 20:27       ` Peter Zijlstra
  0 siblings, 1 reply; 18+ messages in thread
From: Steven Rostedt @ 2017-04-18 16:46 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: jbaron, tglx, mingo, bigeasy, linux-kernel

On Tue, 18 Apr 2017 15:03:50 +0200
Peter Zijlstra <peterz@infradead.org> wrote:

> +++ b/kernel/padata.c
> @@ -1008,11 +1008,10 @@ static struct padata_instance *padata_al
>   *                         parallel workers.
>   *
>   * @wq: workqueue to use for the allocated padata instance
> - *
> - * Must be called from a get_online_cpus() protected region

Find the comment redundant?

Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

-- Steve

>   */
>  struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
>  {
> +	lockdep_assert_hotplug_held();
>  	return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
>  }
>  EXPORT_SYMBOL(padata_alloc_possible);
> --- a/kernel/stop_machine.c
> +++ b/kernel/stop_machine.c
> @@ -561,6 +561,8 @@ int stop_machine_locked(cpu_stop_fn_t fn
>  		.active_cpus = cpus,
>  	};
>  
> +	lockdep_assert_hotplug_held();
> +
>  	if (!stop_machine_initialized) {
>  		/*
>  		 * Handle the case where stop_machine() is called

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-18 16:46     ` Steven Rostedt
@ 2017-04-18 20:27       ` Peter Zijlstra
  2017-04-18 20:50         ` Thomas Gleixner
  0 siblings, 1 reply; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-18 20:27 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: jbaron, tglx, mingo, bigeasy, linux-kernel

On Tue, Apr 18, 2017 at 12:46:29PM -0400, Steven Rostedt wrote:
> On Tue, 18 Apr 2017 15:03:50 +0200
> Peter Zijlstra <peterz@infradead.org> wrote:
> 
> > +++ b/kernel/padata.c
> > @@ -1008,11 +1008,10 @@ static struct padata_instance *padata_al
> >   *                         parallel workers.
> >   *
> >   * @wq: workqueue to use for the allocated padata instance
> > - *
> > - * Must be called from a get_online_cpus() protected region
> 
> Find the comment redundant?

Once there's code that enforces it? Yes. Nobody reads comments
;-)

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-18 20:27       ` Peter Zijlstra
@ 2017-04-18 20:50         ` Thomas Gleixner
  2017-04-19  6:39           ` Peter Zijlstra
  0 siblings, 1 reply; 18+ messages in thread
From: Thomas Gleixner @ 2017-04-18 20:50 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Steven Rostedt, jbaron, mingo, bigeasy, linux-kernel

On Tue, 18 Apr 2017, Peter Zijlstra wrote:
> On Tue, Apr 18, 2017 at 12:46:29PM -0400, Steven Rostedt wrote:
> > On Tue, 18 Apr 2017 15:03:50 +0200
> > Peter Zijlstra <peterz@infradead.org> wrote:
> > 
> > > +++ b/kernel/padata.c
> > > @@ -1008,11 +1008,10 @@ static struct padata_instance *padata_al
> > >   *                         parallel workers.
> > >   *
> > >   * @wq: workqueue to use for the allocated padata instance
> > > - *
> > > - * Must be called from a get_online_cpus() protected region
> > 
> > Find the comment redundant?
> 
> Once there's code that enforces it? Yes. Nobody reads comments
> ;-)

Nobody enables lockdep either .....

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-18 20:50         ` Thomas Gleixner
@ 2017-04-19  6:39           ` Peter Zijlstra
  2017-04-19  9:08             ` Thomas Gleixner
  0 siblings, 1 reply; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-19  6:39 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: Steven Rostedt, jbaron, mingo, bigeasy, linux-kernel

On Tue, Apr 18, 2017 at 10:50:43PM +0200, Thomas Gleixner wrote:
> On Tue, 18 Apr 2017, Peter Zijlstra wrote:
> > On Tue, Apr 18, 2017 at 12:46:29PM -0400, Steven Rostedt wrote:
> > > On Tue, 18 Apr 2017 15:03:50 +0200
> > > Peter Zijlstra <peterz@infradead.org> wrote:
> > > 
> > > > +++ b/kernel/padata.c
> > > > @@ -1008,11 +1008,10 @@ static struct padata_instance *padata_al
> > > >   *                         parallel workers.
> > > >   *
> > > >   * @wq: workqueue to use for the allocated padata instance
> > > > - *
> > > > - * Must be called from a get_online_cpus() protected region
> > > 
> > > Find the comment redundant?
> > 
> > Once there's code that enforces it? Yes. Nobody reads comments
> > ;-)
> 
> Nobody enables lockdep either .....

In the grand scheme of things, true. But there are more people running
with lockdep enabled than there are people writing code, of which there
are more than people reading relevant comments while writing code.
Therefore having the lockdep annotation is two orders better than a
comment ;-)

Also, I would argue that an "assert" at the start of a function is a
fairly readable 'comment' all by itself.

In any case, I don't care too much. But I typically remove such comments
when I stick a lockdep_assert_held() in.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-19  6:39           ` Peter Zijlstra
@ 2017-04-19  9:08             ` Thomas Gleixner
  2017-04-19 12:49               ` Steven Rostedt
  0 siblings, 1 reply; 18+ messages in thread
From: Thomas Gleixner @ 2017-04-19  9:08 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Steven Rostedt, jbaron, mingo, bigeasy, linux-kernel

On Wed, 19 Apr 2017, Peter Zijlstra wrote:
> On Tue, Apr 18, 2017 at 10:50:43PM +0200, Thomas Gleixner wrote:
> > On Tue, 18 Apr 2017, Peter Zijlstra wrote:
> > > On Tue, Apr 18, 2017 at 12:46:29PM -0400, Steven Rostedt wrote:
> > > > On Tue, 18 Apr 2017 15:03:50 +0200
> > > > Peter Zijlstra <peterz@infradead.org> wrote:
> > > > 
> > > > > +++ b/kernel/padata.c
> > > > > @@ -1008,11 +1008,10 @@ static struct padata_instance *padata_al
> > > > >   *                         parallel workers.
> > > > >   *
> > > > >   * @wq: workqueue to use for the allocated padata instance
> > > > > - *
> > > > > - * Must be called from a get_online_cpus() protected region
> > > > 
> > > > Find the comment redundant?
> > > 
> > > Once there's code that enforces it? Yes. Nobody reads comments
> > > ;-)
> > 
> > Nobody enables lockdep either .....
> 
> In the grand scheme of things, true. But there are more people running
> with lockdep enabled than there are people writing code, of which there
> are more than people reading relevant comments while writing code.
> Therefore having the lockdep annotation is two orders better than a
> comment ;-)
> 
> Also, I would argue that an "assert" at the start of a function is a
> fairly readable 'comment' all by itself.
> 
> In any case, I don't care too much. But I typically remove such comments
> when I stick a lockdep_assert_held() in.

I think that's wrong. We are striving for better documentation and the
kernel-doc comments above a function are part of that. Calling conventions
are definitely something which belongs there.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-19  9:08             ` Thomas Gleixner
@ 2017-04-19 12:49               ` Steven Rostedt
  2017-04-19 14:08                 ` Peter Zijlstra
  0 siblings, 1 reply; 18+ messages in thread
From: Steven Rostedt @ 2017-04-19 12:49 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: Peter Zijlstra, jbaron, mingo, bigeasy, linux-kernel

On Wed, 19 Apr 2017 11:08:35 +0200 (CEST)
Thomas Gleixner <tglx@linutronix.de> wrote:


> > In the grand scheme of things, true. But there are more people running
> > with lockdep enabled than there are people writing code, of which there
> > are more than people reading relevant comments while writing code.
> > Therefore having the lockdep annotation is two orders better than a
> > comment ;-)
> > 
> > Also, I would argue that an "assert" at the start of a function is a
> > fairly readable 'comment' all by itself.
> > 
> > In any case, I don't care too much. But I typically remove such comments
> > when I stick a lockdep_assert_held() in.  
> 
> I think that's wrong. We are striving for better documentation and the
> kernel-doc comments above a function are part of that. Calling conventions
> are definitely something which belongs there.

I agree with Thomas. Removing the comment because a
"lockdep_assert_held()" exists at the top of the code, assumes someone
that is about to use that function did more that read the kerneldoc and
actually looked at the code.

If there's a kerneldoc to a function, than that header should contain
all the info that a developer needs to use that function.

-- Steve

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-19 12:49               ` Steven Rostedt
@ 2017-04-19 14:08                 ` Peter Zijlstra
  2017-04-19 14:21                   ` Steven Rostedt
  0 siblings, 1 reply; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-19 14:08 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: Thomas Gleixner, jbaron, mingo, bigeasy, linux-kernel

On Wed, Apr 19, 2017 at 08:49:11AM -0400, Steven Rostedt wrote:
> I agree with Thomas. Removing the comment because a
> "lockdep_assert_held()" exists at the top of the code, assumes someone
> that is about to use that function did more that read the kerneldoc and
> actually looked at the code.

No, it doesn't assume anything. You can use the function without reading
_any_ comments. It will explode at runtime (preferably on the machine of
the guy who wrote it -- who _SHOULD_ have lockdep enabled, but certainly
on other developer's machines and test-bots, who will then loudly yell
at said developer for not doing his job properly).

> If there's a kerneldoc to a function, than that header should contain
> all the info that a developer needs to use that function.

Yeah, rainbows and unicorns are shiny. Also, I put kerneldoc (if I put
it at all) at the definition site, not the declaration. So headers are
useless.


In any case, I don't mind the extra line of comment. Don't really see
the point of it either. What I am convinced of is that
lockdep_assert_held() lines are far more useful than such comment lines.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp()
  2017-04-19 14:08                 ` Peter Zijlstra
@ 2017-04-19 14:21                   ` Steven Rostedt
  0 siblings, 0 replies; 18+ messages in thread
From: Steven Rostedt @ 2017-04-19 14:21 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Thomas Gleixner, jbaron, mingo, bigeasy, linux-kernel

On Wed, 19 Apr 2017 16:08:04 +0200
Peter Zijlstra <peterz@infradead.org> wrote:


> Yeah, rainbows and unicorns are shiny. Also, I put kerneldoc (if I put
> it at all) at the definition site, not the declaration. So headers are
> useless.

What's wrong with rainbows and unicorns. We all have our ponys, and
some of them are unicorns.

https://www.slideshare.net/brendangregg/velocity-2015-linux-perf-tools/105

> 
> 
> In any case, I don't mind the extra line of comment. Don't really see
> the point of it either. What I am convinced of is that
> lockdep_assert_held() lines are far more useful than such comment lines.

I agree with the lockdep assert held being more useful. But I disagree
with removing comments about required locks when it is added. A comment
may save a developer an embarrassing moment of being yelled at because
they didn't test their code with lockdep enabled. And that is useful.

-- Steve

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [tip:smp/hotplug] jump_label: Pull get_online_cpus() into generic code
  2017-04-18 10:32 ` [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code Peter Zijlstra
@ 2017-04-20 11:29   ` tip-bot for Peter Zijlstra (Intel)
  2017-04-21 16:08   ` [PATCH 1/3] " Jason Baron
  1 sibling, 0 replies; 18+ messages in thread
From: tip-bot for Peter Zijlstra (Intel) @ 2017-04-20 11:29 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: mingo, rostedt, peterz, linux-kernel, tglx, bigeasy, hpa

Commit-ID:  82947f31231157d8ab70fa8961f23fd3887a3327
Gitweb:     http://git.kernel.org/tip/82947f31231157d8ab70fa8961f23fd3887a3327
Author:     Peter Zijlstra (Intel) <peterz@infradead.org>
AuthorDate: Tue, 18 Apr 2017 19:05:03 +0200
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 20 Apr 2017 13:08:57 +0200

jump_label: Pull get_online_cpus() into generic code

This change does two things:

- it moves the get_online_cpus() call into generic code, with the aim of
  later providing some static_key ops that avoid it.

- as a side effect it inverts the lock order between cpu_hotplug_lock and
  jump_label_mutex.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: jbaron@akamai.com
Link: http://lkml.kernel.org/r/20170418103422.590118425@infradead.org

---
 arch/mips/kernel/jump_label.c  |  2 --
 arch/sparc/kernel/jump_label.c |  2 --
 arch/tile/kernel/jump_label.c  |  2 --
 arch/x86/kernel/jump_label.c   |  2 --
 kernel/jump_label.c            | 14 ++++++++++++++
 5 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 3e586da..32e3168 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
 		insn.word = 0; /* nop */
 	}
 
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
 		insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
 			   (unsigned long)insn_p + sizeof(*insn_p));
 
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 #endif /* HAVE_JUMP_LABEL */
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 07933b9..93adde1 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
 		val = 0x01000000;
 	}
 
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	*insn = val;
 	flushi(insn);
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 #endif
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
index 07802d5..93931a4 100644
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
@@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e,
 void arch_jump_label_transform(struct jump_entry *e,
 				enum jump_label_type type)
 {
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 
 	__jump_label_transform(e, type);
 	flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
 
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index c37bd0f..ab4f491 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry *entry,
 void arch_jump_label_transform(struct jump_entry *entry,
 			       enum jump_label_type type)
 {
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	__jump_label_transform(entry, type, NULL, 0);
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 static enum {
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 6c9cb20..f3afe07 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
 #include <linux/static_key.h>
 #include <linux/jump_label_ratelimit.h>
 #include <linux/bug.h>
+#include <linux/cpu.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -124,6 +125,12 @@ void static_key_slow_inc(struct static_key *key)
 			return;
 	}
 
+	/*
+	 * A number of architectures need to synchronize I$ across
+	 * the all CPUs, for that to be serialized against CPU hot-plug
+	 * we need to avoid CPUs coming online.
+	 */
+	get_online_cpus();
 	jump_label_lock();
 	if (atomic_read(&key->enabled) == 0) {
 		atomic_set(&key->enabled, -1);
@@ -133,6 +140,7 @@ void static_key_slow_inc(struct static_key *key)
 		atomic_inc(&key->enabled);
 	}
 	jump_label_unlock();
+	put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
@@ -146,6 +154,7 @@ static void __static_key_slow_dec(struct static_key *key,
 	 * returns is unbalanced, because all other static_key_slow_inc()
 	 * instances block while the update is in progress.
 	 */
+	get_online_cpus();
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 		WARN(atomic_read(&key->enabled) < 0,
 		     "jump label: negative count!\n");
@@ -159,6 +168,7 @@ static void __static_key_slow_dec(struct static_key *key,
 		jump_label_update(key);
 	}
 	jump_label_unlock();
+	put_online_cpus();
 }
 
 static void jump_label_update_timeout(struct work_struct *work)
@@ -592,6 +602,10 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
 
 	switch (val) {
 	case MODULE_STATE_COMING:
+		/*
+		 * XXX do we need get_online_cpus() ?  the module isn't
+		 * executable yet, so nothing should be looking at our code.
+		 */
 		jump_label_lock();
 		ret = jump_label_add_module(mod);
 		if (ret) {

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [tip:smp/hotplug] jump_label: Provide static_key_slow_inc_cpuslocked()
  2017-04-18 10:32 ` [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp() Peter Zijlstra
  2017-04-18 13:03   ` Peter Zijlstra
@ 2017-04-20 11:29   ` tip-bot for Peter Zijlstra (Intel)
  1 sibling, 0 replies; 18+ messages in thread
From: tip-bot for Peter Zijlstra (Intel) @ 2017-04-20 11:29 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, bigeasy, peterz, tglx, rostedt, hpa, mingo

Commit-ID:  f5efc6fad63f5533a6083e95286920d5753e52bf
Gitweb:     http://git.kernel.org/tip/f5efc6fad63f5533a6083e95286920d5753e52bf
Author:     Peter Zijlstra (Intel) <peterz@infradead.org>
AuthorDate: Tue, 18 Apr 2017 19:05:04 +0200
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 20 Apr 2017 13:08:57 +0200

jump_label: Provide static_key_slow_inc_cpuslocked()

Provide static_key_slow_inc_cpuslocked(), a variant that doesn't take
cpu_hotplug_lock().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: jbaron@akamai.com
Link: http://lkml.kernel.org/r/20170418103422.636958338@infradead.org

---
 include/linux/jump_label.h |  3 +++
 kernel/jump_label.c        | 21 +++++++++++++++++----
 2 files changed, 20 insertions(+), 4 deletions(-)

diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 2afd74b..7d07f0b 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -158,6 +158,7 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
 					     enum jump_label_type type);
 extern int jump_label_text_reserved(void *start, void *end);
 extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
 extern void static_key_slow_dec(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 extern int static_key_count(struct static_key *key);
@@ -213,6 +214,8 @@ static inline void static_key_slow_inc(struct static_key *key)
 	atomic_inc(&key->enabled);
 }
 
+#define static_key_slow_inc_cpuslocked static_key_slow_inc
+
 static inline void static_key_slow_dec(struct static_key *key)
 {
 	STATIC_KEY_CHECK_USE();
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index f3afe07..308b12e 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -101,7 +101,7 @@ void static_key_disable(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-void static_key_slow_inc(struct static_key *key)
+void __static_key_slow_inc(struct static_key *key)
 {
 	int v, v1;
 
@@ -130,7 +130,6 @@ void static_key_slow_inc(struct static_key *key)
 	 * the all CPUs, for that to be serialized against CPU hot-plug
 	 * we need to avoid CPUs coming online.
 	 */
-	get_online_cpus();
 	jump_label_lock();
 	if (atomic_read(&key->enabled) == 0) {
 		atomic_set(&key->enabled, -1);
@@ -140,10 +139,22 @@ void static_key_slow_inc(struct static_key *key)
 		atomic_inc(&key->enabled);
 	}
 	jump_label_unlock();
+}
+
+void static_key_slow_inc(struct static_key *key)
+{
+	get_online_cpus();
+	__static_key_slow_inc(key);
 	put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
+void static_key_slow_inc_cpuslocked(struct static_key *key)
+{
+	__static_key_slow_inc(key);
+}
+EXPORT_SYMBOL_GPL(static_key_slow_inc_cpuslocked);
+
 static void __static_key_slow_dec(struct static_key *key,
 		unsigned long rate_limit, struct delayed_work *work)
 {
@@ -154,7 +165,6 @@ static void __static_key_slow_dec(struct static_key *key,
 	 * returns is unbalanced, because all other static_key_slow_inc()
 	 * instances block while the update is in progress.
 	 */
-	get_online_cpus();
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 		WARN(atomic_read(&key->enabled) < 0,
 		     "jump label: negative count!\n");
@@ -168,20 +178,23 @@ static void __static_key_slow_dec(struct static_key *key,
 		jump_label_update(key);
 	}
 	jump_label_unlock();
-	put_online_cpus();
 }
 
 static void jump_label_update_timeout(struct work_struct *work)
 {
 	struct static_key_deferred *key =
 		container_of(work, struct static_key_deferred, work.work);
+	get_online_cpus();
 	__static_key_slow_dec(&key->key, 0, NULL);
+	put_online_cpus();
 }
 
 void static_key_slow_dec(struct static_key *key)
 {
 	STATIC_KEY_CHECK_USE();
+	get_online_cpus();
 	__static_key_slow_dec(key, 0, NULL);
+	put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [tip:smp/hotplug] perf: Avoid cpu_hotplug_lock r-r recursion
  2017-04-18 10:32 ` [PATCH 3/3] perf: Avoid cpu_hotplug_lock r-r recursion Peter Zijlstra
@ 2017-04-20 11:30   ` tip-bot for Peter Zijlstra (Intel)
  0 siblings, 0 replies; 18+ messages in thread
From: tip-bot for Peter Zijlstra (Intel) @ 2017-04-20 11:30 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: peterz, mingo, linux-kernel, rostedt, tglx, hpa, bigeasy

Commit-ID:  641693094ee1568502280f95900f374b2226b51d
Gitweb:     http://git.kernel.org/tip/641693094ee1568502280f95900f374b2226b51d
Author:     Peter Zijlstra (Intel) <peterz@infradead.org>
AuthorDate: Tue, 18 Apr 2017 19:05:05 +0200
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 20 Apr 2017 13:08:57 +0200

perf: Avoid cpu_hotplug_lock r-r recursion

There are two call-sites where using static_key results in recursing on the
cpu_hotplug_lock.

Use the hotplug locked version of static_key_slow_inc().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: jbaron@akamai.com
Link: http://lkml.kernel.org/r/20170418103422.687248115@infradead.org

---
 kernel/events/core.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 634dd95..8aa3063 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7653,7 +7653,7 @@ static int perf_swevent_init(struct perf_event *event)
 		if (err)
 			return err;
 
-		static_key_slow_inc(&perf_swevent_enabled[event_id]);
+		static_key_slow_inc_cpuslocked(&perf_swevent_enabled[event_id]);
 		event->destroy = sw_perf_event_destroy;
 	}
 
@@ -9160,7 +9160,7 @@ static void account_event(struct perf_event *event)
 
 		mutex_lock(&perf_sched_mutex);
 		if (!atomic_read(&perf_sched_count)) {
-			static_branch_enable(&perf_sched_events);
+			static_key_slow_inc_cpuslocked(&perf_sched_events.key);
 			/*
 			 * Guarantee that all CPUs observe they key change and
 			 * call the perf scheduling hooks before proceeding to

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code
  2017-04-18 10:32 ` [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code Peter Zijlstra
  2017-04-20 11:29   ` [tip:smp/hotplug] " tip-bot for Peter Zijlstra (Intel)
@ 2017-04-21 16:08   ` Jason Baron
  2017-04-21 16:20     ` Peter Zijlstra
  1 sibling, 1 reply; 18+ messages in thread
From: Jason Baron @ 2017-04-21 16:08 UTC (permalink / raw)
  To: Peter Zijlstra, rostedt, tglx, mingo, bigeasy; +Cc: linux-kernel

On 04/18/2017 06:32 AM, Peter Zijlstra wrote:
> This change does two things; it moves the get_online_cpus() call into
> generic code, with the aim of later providing some static_key ops that
> avoid it.
>
> And as a side effect it inverts the relation between cpu_hotplug_lock
> and jump_label_mutex.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---

...

> @@ -146,6 +154,7 @@ static void __static_key_slow_dec(struct
>  	 * returns is unbalanced, because all other static_key_slow_inc()
>  	 * instances block while the update is in progress.
>  	 */
> +	get_online_cpus();
>  	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
>  		WARN(atomic_read(&key->enabled) < 0,
>  		     "jump label: negative count!\n");

So the get and put can be unbalanced here since the above:

'if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))'

is followed by 'return;'. However, I see that the next patch removes 
this and so things are balanced again...


> @@ -159,6 +168,7 @@ static void __static_key_slow_dec(struct
>  		jump_label_update(key);
>  	}
>  	jump_label_unlock();
> +	put_online_cpus();
>  }
>
>  static void jump_label_update_timeout(struct work_struct *work)
> @@ -592,6 +602,10 @@ jump_label_module_notify(struct notifier
>
>  	switch (val) {
>  	case MODULE_STATE_COMING:
> +		/*
> +		 * XXX do we need get_online_cpus() ?  the module isn't
> +		 * executable yet, so nothing should be looking at our code.
> +		 */

Since we're just updating the table of places we potentially need to 
patch, but not actually doing any patching, we should not need 
get_online_cpus() here...so in attempt to reduce confusion I would 
remove this.

Thanks,

-Jason

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code
  2017-04-21 16:08   ` [PATCH 1/3] " Jason Baron
@ 2017-04-21 16:20     ` Peter Zijlstra
  0 siblings, 0 replies; 18+ messages in thread
From: Peter Zijlstra @ 2017-04-21 16:20 UTC (permalink / raw)
  To: Jason Baron; +Cc: rostedt, tglx, mingo, bigeasy, linux-kernel

On Fri, Apr 21, 2017 at 12:08:26PM -0400, Jason Baron wrote:
> On 04/18/2017 06:32 AM, Peter Zijlstra wrote:
> > @@ -146,6 +154,7 @@ static void __static_key_slow_dec(struct
> >  	 * returns is unbalanced, because all other static_key_slow_inc()
> >  	 * instances block while the update is in progress.
> >  	 */
> > +	get_online_cpus();
> >  	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
> >  		WARN(atomic_read(&key->enabled) < 0,
> >  		     "jump label: negative count!\n");
> 
> So the get and put can be unbalanced here since the above:
> 
> 'if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))'
> 
> is followed by 'return;'. However, I see that the next patch removes this
> and so things are balanced again...

Duh.. right you are.

> > @@ -159,6 +168,7 @@ static void __static_key_slow_dec(struct
> >  		jump_label_update(key);
> >  	}
> >  	jump_label_unlock();
> > +	put_online_cpus();
> >  }
> > 
> >  static void jump_label_update_timeout(struct work_struct *work)
> > @@ -592,6 +602,10 @@ jump_label_module_notify(struct notifier
> > 
> >  	switch (val) {
> >  	case MODULE_STATE_COMING:
> > +		/*
> > +		 * XXX do we need get_online_cpus() ?  the module isn't
> > +		 * executable yet, so nothing should be looking at our code.
> > +		 */
> 
> Since we're just updating the table of places we potentially need to patch,
> but not actually doing any patching, we should not need get_online_cpus()
> here...so in attempt to reduce confusion I would remove this.

Thanks for confirming it is indeed not required. Will make it go away.

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2017-04-21 17:08 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-18 10:32 [PATCH 0/3] more cpu hotplug fail Peter Zijlstra
2017-04-18 10:32 ` [PATCH 1/3] jump_label: Pull get_online_cpus() into generic code Peter Zijlstra
2017-04-20 11:29   ` [tip:smp/hotplug] " tip-bot for Peter Zijlstra (Intel)
2017-04-21 16:08   ` [PATCH 1/3] " Jason Baron
2017-04-21 16:20     ` Peter Zijlstra
2017-04-18 10:32 ` [PATCH 2/3] jump_label: Provide static_key_slow_inc_nohp() Peter Zijlstra
2017-04-18 13:03   ` Peter Zijlstra
2017-04-18 16:46     ` Steven Rostedt
2017-04-18 20:27       ` Peter Zijlstra
2017-04-18 20:50         ` Thomas Gleixner
2017-04-19  6:39           ` Peter Zijlstra
2017-04-19  9:08             ` Thomas Gleixner
2017-04-19 12:49               ` Steven Rostedt
2017-04-19 14:08                 ` Peter Zijlstra
2017-04-19 14:21                   ` Steven Rostedt
2017-04-20 11:29   ` [tip:smp/hotplug] jump_label: Provide static_key_slow_inc_cpuslocked() tip-bot for Peter Zijlstra (Intel)
2017-04-18 10:32 ` [PATCH 3/3] perf: Avoid cpu_hotplug_lock r-r recursion Peter Zijlstra
2017-04-20 11:30   ` [tip:smp/hotplug] " tip-bot for Peter Zijlstra (Intel)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).