All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch 01/40] smpboot: Allow selfparking per cpu threads
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-09  0:29   ` Paul E. McKenney
  2013-02-14 17:46   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
  2013-01-31 12:11 ` [patch 02/40] stop_machine: Store task reference in a separate per cpu variable Thomas Gleixner
                   ` (40 subsequent siblings)
  41 siblings, 2 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: smpboot-allow-selfstopping-threads.patch --]
[-- Type: text/plain, Size: 2510 bytes --]

The stop machine threads are still killed when a cpu goes offline. The
reason is that the thread is used to bring the cpu down, so it can't
be parked along with the other per cpu threads.

Allow a per cpu thread to be excluded from automatic parking, so it
can park itself once it's done

Add a create callback function as well.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/smpboot.h |    5 +++++
 kernel/smpboot.c        |    5 +++--
 2 files changed, 8 insertions(+), 2 deletions(-)

Index: linux-2.6/include/linux/smpboot.h
===================================================================
--- linux-2.6.orig/include/linux/smpboot.h
+++ linux-2.6/include/linux/smpboot.h
@@ -14,6 +14,8 @@ struct smpboot_thread_data;
  * @thread_should_run:	Check whether the thread should run or not. Called with
  *			preemption disabled.
  * @thread_fn:		The associated thread function
+ * @create:		Optional setup function, called when the thread gets
+ *			created (Not called from the thread context)
  * @setup:		Optional setup function, called when the thread gets
  *			operational the first time
  * @cleanup:		Optional cleanup function, called when the thread
@@ -22,6 +24,7 @@ struct smpboot_thread_data;
  *			parked (cpu offline)
  * @unpark:		Optional unpark function, called when the thread is
  *			unparked (cpu online)
+ * @selfparking:	Thread is not parked by the park function.
  * @thread_comm:	The base name of the thread
  */
 struct smp_hotplug_thread {
@@ -29,10 +32,12 @@ struct smp_hotplug_thread {
 	struct list_head		list;
 	int				(*thread_should_run)(unsigned int cpu);
 	void				(*thread_fn)(unsigned int cpu);
+	void				(*create)(unsigned int cpu);
 	void				(*setup)(unsigned int cpu);
 	void				(*cleanup)(unsigned int cpu, bool online);
 	void				(*park)(unsigned int cpu);
 	void				(*unpark)(unsigned int cpu);
+	bool				selfparking;
 	const char			*thread_comm;
 };
 
Index: linux-2.6/kernel/smpboot.c
===================================================================
--- linux-2.6.orig/kernel/smpboot.c
+++ linux-2.6/kernel/smpboot.c
@@ -183,9 +183,10 @@ __smpboot_create_thread(struct smp_hotpl
 		kfree(td);
 		return PTR_ERR(tsk);
 	}
-
 	get_task_struct(tsk);
 	*per_cpu_ptr(ht->store, cpu) = tsk;
+	if (ht->create)
+		ht->create(cpu);
 	return 0;
 }
 
@@ -225,7 +226,7 @@ static void smpboot_park_thread(struct s
 {
 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 
-	if (tsk)
+	if (tsk && !ht->selfparking)
 		kthread_park(tsk);
 }
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 02/40] stop_machine: Store task reference in a separate per cpu variable
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
  2013-01-31 12:11 ` [patch 01/40] smpboot: Allow selfparking per cpu threads Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-09  0:33   ` Paul E. McKenney
  2013-02-14 17:47   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
  2013-01-31 12:11 ` [patch 03/40] stop_machine: Use smpboot threads Thomas Gleixner
                   ` (39 subsequent siblings)
  41 siblings, 2 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: stop-machine-store-thread-separate.patch --]
[-- Type: text/plain, Size: 4222 bytes --]

To allow the stopper thread being managed by the smpboot thread
infrastructure separate out the task storage from the stopper data
structure.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/stop_machine.c |   32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

Index: linux-2.6/kernel/stop_machine.c
===================================================================
--- linux-2.6.orig/kernel/stop_machine.c
+++ linux-2.6/kernel/stop_machine.c
@@ -37,10 +37,10 @@ struct cpu_stopper {
 	spinlock_t		lock;
 	bool			enabled;	/* is this stopper enabled? */
 	struct list_head	works;		/* list of pending works */
-	struct task_struct	*thread;	/* stopper thread */
 };
 
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
 static bool stop_machine_initialized = false;
 
 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
@@ -62,16 +62,18 @@ static void cpu_stop_signal_done(struct 
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
-static void cpu_stop_queue_work(struct cpu_stopper *stopper,
-				struct cpu_stop_work *work)
+static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
+
 	unsigned long flags;
 
 	spin_lock_irqsave(&stopper->lock, flags);
 
 	if (stopper->enabled) {
 		list_add_tail(&work->list, &stopper->works);
-		wake_up_process(stopper->thread);
+		wake_up_process(p);
 	} else
 		cpu_stop_signal_done(work->done, false);
 
@@ -108,7 +110,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
 	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
 
 	cpu_stop_init_done(&done, 1);
-	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+	cpu_stop_queue_work(cpu, &work);
 	wait_for_completion(&done.completion);
 	return done.executed ? done.ret : -ENOENT;
 }
@@ -130,7 +132,7 @@ void stop_one_cpu_nowait(unsigned int cp
 			struct cpu_stop_work *work_buf)
 {
 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
-	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
+	cpu_stop_queue_work(cpu, work_buf);
 }
 
 /* static data for stop_cpus */
@@ -159,8 +161,7 @@ static void queue_stop_cpus_work(const s
 	 */
 	preempt_disable();
 	for_each_cpu(cpu, cpumask)
-		cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
-				    &per_cpu(stop_cpus_work, cpu));
+		cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
 	preempt_enable();
 }
 
@@ -304,12 +305,11 @@ static int __cpuinit cpu_stop_cpu_callba
 {
 	unsigned int cpu = (unsigned long)hcpu;
 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-	struct task_struct *p;
+	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_UP_PREPARE:
-		BUG_ON(stopper->thread || stopper->enabled ||
-		       !list_empty(&stopper->works));
+		BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
 		p = kthread_create_on_node(cpu_stopper_thread,
 					   stopper,
 					   cpu_to_node(cpu),
@@ -319,12 +319,12 @@ static int __cpuinit cpu_stop_cpu_callba
 		get_task_struct(p);
 		kthread_bind(p, cpu);
 		sched_set_stop_task(cpu, p);
-		stopper->thread = p;
+		per_cpu(cpu_stopper_task, cpu) = p;
 		break;
 
 	case CPU_ONLINE:
 		/* strictly unnecessary, as first user will wake it */
-		wake_up_process(stopper->thread);
+		wake_up_process(p);
 		/* mark enabled */
 		spin_lock_irq(&stopper->lock);
 		stopper->enabled = true;
@@ -339,7 +339,7 @@ static int __cpuinit cpu_stop_cpu_callba
 
 		sched_set_stop_task(cpu, NULL);
 		/* kill the stopper */
-		kthread_stop(stopper->thread);
+		kthread_stop(p);
 		/* drain remaining works */
 		spin_lock_irq(&stopper->lock);
 		list_for_each_entry(work, &stopper->works, list)
@@ -347,8 +347,8 @@ static int __cpuinit cpu_stop_cpu_callba
 		stopper->enabled = false;
 		spin_unlock_irq(&stopper->lock);
 		/* release the stopper */
-		put_task_struct(stopper->thread);
-		stopper->thread = NULL;
+		put_task_struct(p);
+		per_cpu(cpu_stopper_task, cpu) = NULL;
 		break;
 	}
 #endif



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 03/40] stop_machine: Use smpboot threads
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
  2013-01-31 12:11 ` [patch 01/40] smpboot: Allow selfparking per cpu threads Thomas Gleixner
  2013-01-31 12:11 ` [patch 02/40] stop_machine: Store task reference in a separate per cpu variable Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-09  0:39   ` Paul E. McKenney
  2013-02-14 17:49   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
  2013-01-31 12:11 ` [patch 04/40] cpu: Restructure FROZEN state handling Thomas Gleixner
                   ` (38 subsequent siblings)
  41 siblings, 2 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: stop-machine-use-smpboot-threads.patch --]
[-- Type: text/plain, Size: 6075 bytes --]

Use the smpboot thread infrastructure. Mark the stopper thread
selfparking and park it after it has finished the take_cpu_down()
work.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/cpu.c          |    2 
 kernel/stop_machine.c |  134 ++++++++++++++++++--------------------------------
 2 files changed, 51 insertions(+), 85 deletions(-)

Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -254,6 +254,8 @@ static int __ref take_cpu_down(void *_pa
 		return err;
 
 	cpu_notify(CPU_DYING | param->mod, param->hcpu);
+	/* Park the stopper thread */
+	kthread_park(current);
 	return 0;
 }
 
Index: linux-2.6/kernel/stop_machine.c
===================================================================
--- linux-2.6.orig/kernel/stop_machine.c
+++ linux-2.6/kernel/stop_machine.c
@@ -18,7 +18,7 @@
 #include <linux/stop_machine.h>
 #include <linux/interrupt.h>
 #include <linux/kallsyms.h>
-
+#include <linux/smpboot.h>
 #include <linux/atomic.h>
 
 /*
@@ -245,20 +245,25 @@ int try_stop_cpus(const struct cpumask *
 	return ret;
 }
 
-static int cpu_stopper_thread(void *data)
+static int cpu_stop_should_run(unsigned int cpu)
+{
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+	unsigned long flags;
+	int run;
+
+	spin_lock_irqsave(&stopper->lock, flags);
+	run = !list_empty(&stopper->works);
+	spin_unlock_irqrestore(&stopper->lock, flags);
+	return run;
+}
+
+static void cpu_stopper_thread(unsigned int cpu)
 {
-	struct cpu_stopper *stopper = data;
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 	struct cpu_stop_work *work;
 	int ret;
 
 repeat:
-	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
-
-	if (kthread_should_stop()) {
-		__set_current_state(TASK_RUNNING);
-		return 0;
-	}
-
 	work = NULL;
 	spin_lock_irq(&stopper->lock);
 	if (!list_empty(&stopper->works)) {
@@ -274,8 +279,6 @@ repeat:
 		struct cpu_stop_done *done = work->done;
 		char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 
-		__set_current_state(TASK_RUNNING);
-
 		/* cpu stop callbacks are not allowed to sleep */
 		preempt_disable();
 
@@ -291,87 +294,55 @@ repeat:
 					  ksym_buf), arg);
 
 		cpu_stop_signal_done(done, true);
-	} else
-		schedule();
-
-	goto repeat;
+		goto repeat;
+	}
 }
 
 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 
-/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
-static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
-					   unsigned long action, void *hcpu)
+static void cpu_stop_create(unsigned int cpu)
+{
+	sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
+}
+
+static void cpu_stop_park(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
+	struct cpu_stop_work *work;
+	unsigned long flags;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
-		p = kthread_create_on_node(cpu_stopper_thread,
-					   stopper,
-					   cpu_to_node(cpu),
-					   "migration/%d", cpu);
-		if (IS_ERR(p))
-			return notifier_from_errno(PTR_ERR(p));
-		get_task_struct(p);
-		kthread_bind(p, cpu);
-		sched_set_stop_task(cpu, p);
-		per_cpu(cpu_stopper_task, cpu) = p;
-		break;
+	/* drain remaining works */
+	spin_lock_irqsave(&stopper->lock, flags);
+	list_for_each_entry(work, &stopper->works, list)
+		cpu_stop_signal_done(work->done, false);
+	stopper->enabled = false;
+	spin_unlock_irqrestore(&stopper->lock, flags);
+}
 
-	case CPU_ONLINE:
-		/* strictly unnecessary, as first user will wake it */
-		wake_up_process(p);
-		/* mark enabled */
-		spin_lock_irq(&stopper->lock);
-		stopper->enabled = true;
-		spin_unlock_irq(&stopper->lock);
-		break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_UP_CANCELED:
-	case CPU_POST_DEAD:
-	{
-		struct cpu_stop_work *work;
-
-		sched_set_stop_task(cpu, NULL);
-		/* kill the stopper */
-		kthread_stop(p);
-		/* drain remaining works */
-		spin_lock_irq(&stopper->lock);
-		list_for_each_entry(work, &stopper->works, list)
-			cpu_stop_signal_done(work->done, false);
-		stopper->enabled = false;
-		spin_unlock_irq(&stopper->lock);
-		/* release the stopper */
-		put_task_struct(p);
-		per_cpu(cpu_stopper_task, cpu) = NULL;
-		break;
-	}
-#endif
-	}
+static void cpu_stop_unpark(unsigned int cpu)
+{
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
-	return NOTIFY_OK;
+	spin_lock_irq(&stopper->lock);
+	stopper->enabled = true;
+	spin_unlock_irq(&stopper->lock);
 }
 
-/*
- * Give it a higher priority so that cpu stopper is available to other
- * cpu notifiers.  It currently shares the same priority as sched
- * migration_notifier.
- */
-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
-	.notifier_call	= cpu_stop_cpu_callback,
-	.priority	= 10,
+static struct smp_hotplug_thread cpu_stop_threads = {
+	.store			= &cpu_stopper_task,
+	.thread_should_run	= cpu_stop_should_run,
+	.thread_fn		= cpu_stopper_thread,
+	.thread_comm		= "migration/%u",
+	.create			= cpu_stop_create,
+	.setup			= cpu_stop_unpark,
+	.park			= cpu_stop_park,
+	.unpark			= cpu_stop_unpark,
+	.selfparking		= true,
 };
 
 static int __init cpu_stop_init(void)
 {
-	void *bcpu = (void *)(long)smp_processor_id();
 	unsigned int cpu;
-	int err;
 
 	for_each_possible_cpu(cpu) {
 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -380,15 +351,8 @@ static int __init cpu_stop_init(void)
 		INIT_LIST_HEAD(&stopper->works);
 	}
 
-	/* start one for the boot cpu */
-	err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
-				    bcpu);
-	BUG_ON(err != NOTIFY_OK);
-	cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
-	register_cpu_notifier(&cpu_stop_cpu_notifier);
-
+	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
 	stop_machine_initialized = true;
-
 	return 0;
 }
 early_initcall(cpu_stop_init);



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 05/40] cpu: Restructure cpu_down code
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (3 preceding siblings ...)
  2013-01-31 12:11 ` [patch 04/40] cpu: Restructure FROZEN state handling Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-09  0:49   ` Paul E. McKenney
  2014-10-09 17:05   ` Borislav Petkov
  2013-01-31 12:11 ` [patch 06/40] cpu: hotplug: Split out cpu down functions Thomas Gleixner
                   ` (36 subsequent siblings)
  41 siblings, 2 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpu-hotplug-split-out-cpu-up.patch --]
[-- Type: text/plain, Size: 2547 bytes --]

Split out into separate functions, so we can convert it to a state machine.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/cpu.c |   69 ++++++++++++++++++++++++++++++++++++++++-------------------
 1 file changed, 47 insertions(+), 22 deletions(-)

Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -168,6 +168,43 @@ static int cpu_notify(unsigned long val,
 	return __cpu_notify(val, cpu, -1, NULL);
 }
 
+/* Notifier wrappers for transitioning to state machine */
+static int notify_prepare(unsigned int cpu)
+{
+	int nr_calls = 0;
+	int ret;
+
+	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
+	if (ret) {
+		nr_calls--;
+		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
+				__func__, cpu);
+		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
+	}
+	return ret;
+}
+
+static int notify_online(unsigned int cpu)
+{
+	cpu_notify(CPU_ONLINE, cpu);
+	return 0;
+}
+
+static int bringup_cpu(unsigned int cpu)
+{
+	struct task_struct *idle = idle_thread_get(cpu);
+	int ret;
+
+	/* Arch-specific enabling code. */
+	ret = __cpu_up(cpu, idle);
+	if (ret) {
+		cpu_notify(CPU_UP_CANCELED, cpu);
+		return ret;
+	}
+	BUG_ON(!cpu_online(cpu));
+	return 0;
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 
 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
@@ -340,7 +377,7 @@ EXPORT_SYMBOL(cpu_down);
 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 {
 	struct task_struct *idle;
-	int ret, nr_calls = 0;
+	int ret;
 
 	cpu_hotplug_begin();
 
@@ -355,35 +392,23 @@ static int __cpuinit _cpu_up(unsigned in
 		goto out;
 	}
 
+	cpuhp_tasks_frozen = tasks_frozen;
+
 	ret = smpboot_create_threads(cpu);
 	if (ret)
 		goto out;
 
-	cpuhp_tasks_frozen = tasks_frozen;
-
-	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
-	if (ret) {
-		nr_calls--;
-		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
-				__func__, cpu);
-		goto out_notify;
-	}
+	ret = notify_prepare(cpu);
+	if (ret)
+		goto out;
 
-	/* Arch-specific enabling code. */
-	ret = __cpu_up(cpu, idle);
-	if (ret != 0)
-		goto out_notify;
-	BUG_ON(!cpu_online(cpu));
+	ret = bringup_cpu(cpu);
+	if (ret)
+		goto out;
 
 	/* Wake the per cpu threads */
 	smpboot_unpark_threads(cpu);
-
-	/* Now call notifier in preparation. */
-	cpu_notify(CPU_ONLINE, cpu);
-
-out_notify:
-	if (ret != 0)
-		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
+	notify_online(cpu);
 out:
 	cpu_hotplug_done();
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 04/40] cpu: Restructure FROZEN state handling
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (2 preceding siblings ...)
  2013-01-31 12:11 ` [patch 03/40] stop_machine: Use smpboot threads Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-09  0:52   ` Paul E. McKenney
  2014-10-09 16:53   ` Borislav Petkov
  2013-01-31 12:11 ` [patch 05/40] cpu: Restructure cpu_down code Thomas Gleixner
                   ` (37 subsequent siblings)
  41 siblings, 2 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpu-hotplug-restructure-code.patch --]
[-- Type: text/plain, Size: 5745 bytes --]

There are only a few callbacks which really care about FROZEN
vs. !FROZEN. No need to have extra states for this. 

Publish the frozen state in an extra variable which is updated under
the hotplug lock and let the users interested deal with it w/o
imposing that extra state checks on everyone.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/cpu.c |   66 ++++++++++++++++++++++++-----------------------------------
 1 file changed, 27 insertions(+), 39 deletions(-)

Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -25,6 +25,7 @@
 #ifdef CONFIG_SMP
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
 static DEFINE_MUTEX(cpu_add_remove_lock);
+static bool cpuhp_tasks_frozen;
 
 /*
  * The following two API's must be used when attempting
@@ -148,27 +149,30 @@ int __ref register_cpu_notifier(struct n
 	return ret;
 }
 
-static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
+static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
 			int *nr_calls)
 {
+	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
+	void *hcpu = (void *)(long)cpu;
+
 	int ret;
 
-	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
+	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
 					nr_calls);
 
 	return notifier_to_errno(ret);
 }
 
-static int cpu_notify(unsigned long val, void *v)
+static int cpu_notify(unsigned long val, unsigned int cpu)
 {
-	return __cpu_notify(val, v, -1, NULL);
+	return __cpu_notify(val, cpu, -1, NULL);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-static void cpu_notify_nofail(unsigned long val, void *v)
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
 {
-	BUG_ON(cpu_notify(val, v));
+	BUG_ON(cpu_notify(val, cpu));
 }
 EXPORT_SYMBOL(register_cpu_notifier);
 
@@ -237,23 +241,17 @@ static inline void check_for_tasks(int c
 	write_unlock_irq(&tasklist_lock);
 }
 
-struct take_cpu_down_param {
-	unsigned long mod;
-	void *hcpu;
-};
-
 /* Take this CPU down. */
 static int __ref take_cpu_down(void *_param)
 {
-	struct take_cpu_down_param *param = _param;
-	int err;
+	int err, cpu = smp_processor_id();
 
 	/* Ensure this CPU doesn't handle any more interrupts. */
 	err = __cpu_disable();
 	if (err < 0)
 		return err;
 
-	cpu_notify(CPU_DYING | param->mod, param->hcpu);
+	cpu_notify(CPU_DYING, cpu);
 	/* Park the stopper thread */
 	kthread_park(current);
 	return 0;
@@ -263,12 +261,6 @@ static int __ref take_cpu_down(void *_pa
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
 	int err, nr_calls = 0;
-	void *hcpu = (void *)(long)cpu;
-	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
-	struct take_cpu_down_param tcd_param = {
-		.mod = mod,
-		.hcpu = hcpu,
-	};
 
 	if (num_online_cpus() == 1)
 		return -EBUSY;
@@ -278,21 +270,23 @@ static int __ref _cpu_down(unsigned int 
 
 	cpu_hotplug_begin();
 
-	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+	cpuhp_tasks_frozen = tasks_frozen;
+
+	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
 	if (err) {
 		nr_calls--;
-		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
 		printk("%s: attempt to take down CPU %u failed\n",
 				__func__, cpu);
 		goto out_release;
 	}
 	smpboot_park_threads(cpu);
 
-	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+	err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
 		/* CPU didn't die: tell everyone.  Can't complain. */
 		smpboot_unpark_threads(cpu);
-		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
+		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
 		goto out_release;
 	}
 	BUG_ON(cpu_online(cpu));
@@ -311,14 +305,14 @@ static int __ref _cpu_down(unsigned int 
 	__cpu_die(cpu);
 
 	/* CPU is completely dead: tell everyone.  Too late to complain. */
-	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
+	cpu_notify_nofail(CPU_DEAD, cpu);
 
 	check_for_tasks(cpu);
 
 out_release:
 	cpu_hotplug_done();
 	if (!err)
-		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+		cpu_notify_nofail(CPU_POST_DEAD, cpu);
 	return err;
 }
 
@@ -345,10 +339,8 @@ EXPORT_SYMBOL(cpu_down);
 /* Requires cpu_add_remove_lock to be held */
 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 {
-	int ret, nr_calls = 0;
-	void *hcpu = (void *)(long)cpu;
-	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 	struct task_struct *idle;
+	int ret, nr_calls = 0;
 
 	cpu_hotplug_begin();
 
@@ -367,7 +359,9 @@ static int __cpuinit _cpu_up(unsigned in
 	if (ret)
 		goto out;
 
-	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
+	cpuhp_tasks_frozen = tasks_frozen;
+
+	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
 	if (ret) {
 		nr_calls--;
 		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
@@ -385,11 +379,11 @@ static int __cpuinit _cpu_up(unsigned in
 	smpboot_unpark_threads(cpu);
 
 	/* Now call notifier in preparation. */
-	cpu_notify(CPU_ONLINE | mod, hcpu);
+	cpu_notify(CPU_ONLINE, cpu);
 
 out_notify:
 	if (ret != 0)
-		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
+		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
 out:
 	cpu_hotplug_done();
 
@@ -627,13 +621,7 @@ core_initcall(cpu_hotplug_pm_sync_init);
  */
 void __cpuinit notify_cpu_starting(unsigned int cpu)
 {
-	unsigned long val = CPU_STARTING;
-
-#ifdef CONFIG_PM_SLEEP_SMP
-	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
-		val = CPU_STARTING_FROZEN;
-#endif /* CONFIG_PM_SLEEP_SMP */
-	cpu_notify(val, (void *)(long)cpu);
+	cpu_notify(CPU_STARTING, cpu);
 }
 
 #endif /* CONFIG_SMP */



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 06/40] cpu: hotplug: Split out cpu down functions
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (4 preceding siblings ...)
  2013-01-31 12:11 ` [patch 05/40] cpu: Restructure cpu_down code Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-09  0:54   ` Paul E. McKenney
  2013-01-31 12:11 ` [patch 07/40] cpu: hotplug: Convert to a state machine for the control processor Thomas Gleixner
                   ` (35 subsequent siblings)
  41 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpu-hotplug-split-out-cpu-down.patch --]
[-- Type: text/plain, Size: 3166 bytes --]

Split cpu_down in separate functions in preparation for state machine
conversion.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/cpu.c |   83 +++++++++++++++++++++++++++++++++++++----------------------
 1 file changed, 53 insertions(+), 30 deletions(-)

Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -206,11 +206,6 @@ static int bringup_cpu(unsigned int cpu)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-
-static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
-{
-	BUG_ON(cpu_notify(val, cpu));
-}
 EXPORT_SYMBOL(register_cpu_notifier);
 
 void __ref unregister_cpu_notifier(struct notifier_block *nb)
@@ -278,6 +273,25 @@ static inline void check_for_tasks(int c
 	write_unlock_irq(&tasklist_lock);
 }
 
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
+{
+	BUG_ON(cpu_notify(val, cpu));
+}
+
+static int notify_down_prepare(unsigned int cpu)
+{
+	int err, nr_calls = 0;
+
+	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
+	if (err) {
+		nr_calls--;
+		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
+		printk("%s: attempt to take down CPU %u failed\n",
+				__func__, cpu);
+	}
+	return err;
+}
+
 /* Take this CPU down. */
 static int __ref take_cpu_down(void *_param)
 {
@@ -294,37 +308,17 @@ static int __ref take_cpu_down(void *_pa
 	return 0;
 }
 
-/* Requires cpu_add_remove_lock to be held */
-static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+static int takedown_cpu(unsigned int cpu)
 {
-	int err, nr_calls = 0;
-
-	if (num_online_cpus() == 1)
-		return -EBUSY;
-
-	if (!cpu_online(cpu))
-		return -EINVAL;
-
-	cpu_hotplug_begin();
-
-	cpuhp_tasks_frozen = tasks_frozen;
+	int err;
 
-	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
-	if (err) {
-		nr_calls--;
-		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
-		printk("%s: attempt to take down CPU %u failed\n",
-				__func__, cpu);
-		goto out_release;
-	}
 	smpboot_park_threads(cpu);
-
 	err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
 		/* CPU didn't die: tell everyone.  Can't complain. */
 		smpboot_unpark_threads(cpu);
 		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
-		goto out_release;
+		return err;
 	}
 	BUG_ON(cpu_online(cpu));
 
@@ -341,10 +335,39 @@ static int __ref _cpu_down(unsigned int 
 	/* This actually kills the CPU. */
 	__cpu_die(cpu);
 
-	/* CPU is completely dead: tell everyone.  Too late to complain. */
-	cpu_notify_nofail(CPU_DEAD, cpu);
+	return 0;
+}
 
+static int notify_dead(unsigned int cpu)
+{
+	cpu_notify_nofail(CPU_DEAD, cpu);
 	check_for_tasks(cpu);
+	return 0;
+}
+
+/* Requires cpu_add_remove_lock to be held */
+static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+{
+	int err;
+
+	if (num_online_cpus() == 1)
+		return -EBUSY;
+
+	if (!cpu_online(cpu))
+		return -EINVAL;
+
+	cpu_hotplug_begin();
+
+	cpuhp_tasks_frozen = tasks_frozen;
+
+	err = notify_down_prepare(cpu);
+	if (err)
+		goto out_release;
+	err = takedown_cpu(cpu);
+	if (err)
+		goto out_release;
+
+	notify_dead(cpu);
 
 out_release:
 	cpu_hotplug_done();



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 07/40] cpu: hotplug: Convert to a state machine for the control processor
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (5 preceding siblings ...)
  2013-01-31 12:11 ` [patch 06/40] cpu: hotplug: Split out cpu down functions Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-11 20:09   ` Paul E. McKenney
  2013-01-31 12:11 ` [patch 08/40] cpu: hotplug: Convert the hotplugged processor work to a state machine Thomas Gleixner
                   ` (34 subsequent siblings)
  41 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpu-hotplug-implement-state-machine.patch --]
[-- Type: text/plain, Size: 10292 bytes --]

Move the split out steps into a callback array and let the cpu_up/down
code iterate through the array functions. For now most of the
callbacks are asymetric to resemble the current hotplug maze.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |    4 +
 include/linux/cpuhotplug.h |   16 ++++
 init/main.c                |   15 ---
 kernel/cpu.c               |  180 ++++++++++++++++++++++++++++++++++++---------
 kernel/smpboot.c           |    6 +
 kernel/smpboot.h           |    4 -
 6 files changed, 173 insertions(+), 52 deletions(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -26,6 +26,9 @@ struct cpu {
 	struct device dev;
 };
 
+extern void boot_cpu_init(void);
+extern void boot_cpu_state_init(void);
+
 extern int register_cpu(struct cpu *cpu, int num);
 extern struct device *get_cpu_device(unsigned cpu);
 extern bool cpu_is_hotpluggable(unsigned cpu);
@@ -112,6 +115,7 @@ enum {
 
 
 #ifdef CONFIG_SMP
+extern bool cpuhp_tasks_frozen;
 /* Need to know about CPUs going up/down? */
 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
 #define cpu_notifier(fn, pri) {					\
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- /dev/null
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -0,0 +1,16 @@
+#ifndef __CPUHOTPLUG_H
+#define __CPUHOTPLUG_H
+
+enum cpuhp_states {
+	CPUHP_OFFLINE,
+	CPUHP_CREATE_THREADS,
+	CPUHP_NOTIFY_PREPARE,
+	CPUHP_NOTIFY_DEAD,
+	CPUHP_BRINGUP_CPU,
+	CPUHP_TEARDOWN_CPU,
+	CPUHP_PERCPU_THREADS,
+	CPUHP_NOTIFY_ONLINE,
+	CPUHP_NOTIFY_DOWN_PREPARE,
+	CPUHP_MAX,
+};
+#endif
Index: linux-2.6/init/main.c
===================================================================
--- linux-2.6.orig/init/main.c
+++ linux-2.6/init/main.c
@@ -424,20 +424,6 @@ void __init parse_early_param(void)
 	done = 1;
 }
 
-/*
- *	Activate the first processor.
- */
-
-static void __init boot_cpu_init(void)
-{
-	int cpu = smp_processor_id();
-	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
-	set_cpu_online(cpu, true);
-	set_cpu_active(cpu, true);
-	set_cpu_present(cpu, true);
-	set_cpu_possible(cpu, true);
-}
-
 void __init __weak smp_setup_processor_id(void)
 {
 }
@@ -502,6 +488,7 @@ asmlinkage void __init start_kernel(void
 	setup_command_line(command_line);
 	setup_nr_cpu_ids();
 	setup_per_cpu_areas();
+	boot_cpu_state_init();
 	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
 
 	build_all_zonelists(NULL, NULL);
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -19,13 +19,24 @@
 #include <linux/mutex.h>
 #include <linux/gfp.h>
 #include <linux/suspend.h>
+#include <linux/cpuhotplug.h>
 
 #include "smpboot.h"
 
+/* CPU state */
+static DEFINE_PER_CPU(enum cpuhp_states, cpuhp_state);
+
+struct cpuhp_step {
+	int (*startup)(unsigned int cpu);
+	int (*teardown)(unsigned int cpu);
+};
+
+static struct cpuhp_step cpuhp_bp_states[];
+
 #ifdef CONFIG_SMP
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
 static DEFINE_MUTEX(cpu_add_remove_lock);
-static bool cpuhp_tasks_frozen;
+bool cpuhp_tasks_frozen;
 
 /*
  * The following two API's must be used when attempting
@@ -310,13 +321,10 @@ static int __ref take_cpu_down(void *_pa
 
 static int takedown_cpu(unsigned int cpu)
 {
-	int err;
+	int err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
 
-	smpboot_park_threads(cpu);
-	err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
 		/* CPU didn't die: tell everyone.  Can't complain. */
-		smpboot_unpark_threads(cpu);
 		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
 		return err;
 	}
@@ -345,10 +353,32 @@ static int notify_dead(unsigned int cpu)
 	return 0;
 }
 
+#else
+#define notify_down_prepare	NULL
+#define takedown_cpu		NULL
+#define notify_dead		NULL
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void undo_cpu_down(unsigned int cpu, int step)
+{
+	while (step++ < CPUHP_MAX) {
+		/*
+		 * Transitional check. Will be removed when we have a
+		 * fully symetric mechanism
+		 */
+		if (!cpuhp_bp_states[step].teardown)
+			continue;
+
+		if (cpuhp_bp_states[step].startup)
+			cpuhp_bp_states[step].startup(cpu);
+	}
+}
+
 /* Requires cpu_add_remove_lock to be held */
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
-	int err;
+	int ret = 0, step;
 
 	if (num_online_cpus() == 1)
 		return -EBUSY;
@@ -360,20 +390,23 @@ static int __ref _cpu_down(unsigned int 
 
 	cpuhp_tasks_frozen = tasks_frozen;
 
-	err = notify_down_prepare(cpu);
-	if (err)
-		goto out_release;
-	err = takedown_cpu(cpu);
-	if (err)
-		goto out_release;
-
-	notify_dead(cpu);
+	for (step = per_cpu(cpuhp_state, cpu); step > 0; step--) {
+		if (cpuhp_bp_states[step].teardown) {
+			ret = cpuhp_bp_states[step].teardown(cpu);
+			if (ret) {
+				undo_cpu_down(cpu, step + 1);
+				step = CPUHP_MAX;
+				break;
+			}
+		}
+	}
+	/* Store the current cpu state */
+	per_cpu(cpuhp_state, cpu) = step;
 
-out_release:
 	cpu_hotplug_done();
-	if (!err)
+	if (!ret)
 		cpu_notify_nofail(CPU_POST_DEAD, cpu);
-	return err;
+	return ret;
 }
 
 int __ref cpu_down(unsigned int cpu)
@@ -396,11 +429,25 @@ out:
 EXPORT_SYMBOL(cpu_down);
 #endif /*CONFIG_HOTPLUG_CPU*/
 
+static void undo_cpu_up(unsigned int cpu, int step)
+{
+	while (step--) {
+		/*
+		 * Transitional check. Will be removed when we have a
+		 * fully symetric mechanism
+		 */
+		if (!cpuhp_bp_states[step].startup)
+			continue;
+		if (cpuhp_bp_states[step].teardown)
+			cpuhp_bp_states[step].teardown(cpu);
+	}
+}
+
 /* Requires cpu_add_remove_lock to be held */
 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 {
+	int ret = 0, step;
 	struct task_struct *idle;
-	int ret;
 
 	cpu_hotplug_begin();
 
@@ -409,6 +456,7 @@ static int __cpuinit _cpu_up(unsigned in
 		goto out;
 	}
 
+	/* Let it fail before we try to bring the cpu up */
 	idle = idle_thread_get(cpu);
 	if (IS_ERR(idle)) {
 		ret = PTR_ERR(idle);
@@ -417,24 +465,20 @@ static int __cpuinit _cpu_up(unsigned in
 
 	cpuhp_tasks_frozen = tasks_frozen;
 
-	ret = smpboot_create_threads(cpu);
-	if (ret)
-		goto out;
-
-	ret = notify_prepare(cpu);
-	if (ret)
-		goto out;
-
-	ret = bringup_cpu(cpu);
-	if (ret)
-		goto out;
-
-	/* Wake the per cpu threads */
-	smpboot_unpark_threads(cpu);
-	notify_online(cpu);
+	for (step = per_cpu(cpuhp_state, cpu); step < CPUHP_MAX; step++) {
+		if (cpuhp_bp_states[step].startup) {
+			ret = cpuhp_bp_states[step].startup(cpu);
+			if (ret) {
+				undo_cpu_up(cpu, step - 1);
+				step = 0;
+				break;
+			}
+		}
+	}
+	/* Store the current cpu state */
+	per_cpu(cpuhp_state, cpu) = step;
 out:
 	cpu_hotplug_done();
-
 	return ret;
 }
 
@@ -674,6 +718,52 @@ void __cpuinit notify_cpu_starting(unsig
 
 #endif /* CONFIG_SMP */
 
+/* Boot processor state steps */
+static struct cpuhp_step cpuhp_bp_states[] = {
+	[CPUHP_OFFLINE] = {
+		.startup = NULL,
+		.teardown = NULL,
+	},
+#ifdef CONFIG_SMP
+	[CPUHP_CREATE_THREADS] = {
+		.startup = smpboot_create_threads,
+		.teardown = NULL,
+	},
+	[CPUHP_NOTIFY_PREPARE] = {
+		.startup = notify_prepare,
+		.teardown = NULL,
+	},
+	[CPUHP_NOTIFY_DEAD] = {
+		.startup = NULL,
+		.teardown = notify_dead,
+	},
+	[CPUHP_BRINGUP_CPU] = {
+		.startup = bringup_cpu,
+		.teardown = NULL,
+	},
+	[CPUHP_TEARDOWN_CPU] = {
+		.startup = NULL,
+		.teardown = takedown_cpu,
+	},
+	[CPUHP_PERCPU_THREADS] = {
+		.startup = smpboot_unpark_threads,
+		.teardown = smpboot_park_threads,
+	},
+	[CPUHP_NOTIFY_ONLINE] = {
+		.startup = notify_online,
+		.teardown = NULL,
+	},
+	[CPUHP_NOTIFY_DOWN_PREPARE] = {
+		.startup = NULL,
+		.teardown = notify_down_prepare,
+	},
+#endif
+	[CPUHP_MAX] = {
+		.startup = NULL,
+		.teardown = NULL,
+	},
+};
+
 /*
  * cpu_bit_bitmap[] is a special, "compressed" data structure that
  * represents all NR_CPUS bits binary values of 1<<nr.
@@ -769,3 +859,25 @@ void init_cpu_online(const struct cpumas
 {
 	cpumask_copy(to_cpumask(cpu_online_bits), src);
 }
+
+/*
+ * Activate the first processor.
+ */
+void __init boot_cpu_init(void)
+{
+	int cpu = smp_processor_id();
+
+	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
+	set_cpu_online(cpu, true);
+	set_cpu_active(cpu, true);
+	set_cpu_present(cpu, true);
+	set_cpu_possible(cpu, true);
+}
+
+/*
+ * Must be called _AFTER_ setting up the per_cpu areas
+ */
+void __init boot_cpu_state_init(void)
+{
+	per_cpu(cpuhp_state, smp_processor_id()) = CPUHP_MAX;
+}
Index: linux-2.6/kernel/smpboot.c
===================================================================
--- linux-2.6.orig/kernel/smpboot.c
+++ linux-2.6/kernel/smpboot.c
@@ -212,7 +212,7 @@ static void smpboot_unpark_thread(struct
 	kthread_unpark(tsk);
 }
 
-void smpboot_unpark_threads(unsigned int cpu)
+int smpboot_unpark_threads(unsigned int cpu)
 {
 	struct smp_hotplug_thread *cur;
 
@@ -220,6 +220,7 @@ void smpboot_unpark_threads(unsigned int
 	list_for_each_entry(cur, &hotplug_threads, list)
 		smpboot_unpark_thread(cur, cpu);
 	mutex_unlock(&smpboot_threads_lock);
+	return 0;
 }
 
 static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
@@ -230,7 +231,7 @@ static void smpboot_park_thread(struct s
 		kthread_park(tsk);
 }
 
-void smpboot_park_threads(unsigned int cpu)
+int smpboot_park_threads(unsigned int cpu)
 {
 	struct smp_hotplug_thread *cur;
 
@@ -238,6 +239,7 @@ void smpboot_park_threads(unsigned int c
 	list_for_each_entry_reverse(cur, &hotplug_threads, list)
 		smpboot_park_thread(cur, cpu);
 	mutex_unlock(&smpboot_threads_lock);
+	return 0;
 }
 
 static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
Index: linux-2.6/kernel/smpboot.h
===================================================================
--- linux-2.6.orig/kernel/smpboot.h
+++ linux-2.6/kernel/smpboot.h
@@ -14,7 +14,7 @@ static inline void idle_threads_init(voi
 #endif
 
 int smpboot_create_threads(unsigned int cpu);
-void smpboot_park_threads(unsigned int cpu);
-void smpboot_unpark_threads(unsigned int cpu);
+int smpboot_park_threads(unsigned int cpu);
+int smpboot_unpark_threads(unsigned int cpu);
 
 #endif



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 08/40] cpu: hotplug: Convert the hotplugged processor work to a state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (6 preceding siblings ...)
  2013-01-31 12:11 ` [patch 07/40] cpu: hotplug: Convert to a state machine for the control processor Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-11 20:17   ` Paul E. McKenney
  2013-01-31 12:11 ` [patch 10/40] sched: Convert to state machine callbacks Thomas Gleixner
                   ` (33 subsequent siblings)
  41 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpu-hotplug-implement-starting-dying-statemachine.patch --]
[-- Type: text/plain, Size: 4417 bytes --]

Move the functions which need to run on the hotplugged processor into
a state machine array and let the code iterate through these functions.

In a later state, this will grow synchronization points between the
control processor and the hotplugged processor, so we can move the
various architecture implementations of the synchronizations to the
core.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |    4 ++
 kernel/cpu.c               |   70 +++++++++++++++++++++++++++++++++++----------
 2 files changed, 59 insertions(+), 15 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -7,6 +7,10 @@ enum cpuhp_states {
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_BRINGUP_CPU,
+	CPUHP_AP_OFFLINE,
+	CPUHP_AP_NOTIFY_STARTING,
+	CPUHP_AP_NOTIFY_DYING,
+	CPUHP_AP_MAX,
 	CPUHP_TEARDOWN_CPU,
 	CPUHP_PERCPU_THREADS,
 	CPUHP_NOTIFY_ONLINE,
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -32,6 +32,7 @@ struct cpuhp_step {
 };
 
 static struct cpuhp_step cpuhp_bp_states[];
+static struct cpuhp_step cpuhp_ap_states[];
 
 #ifdef CONFIG_SMP
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
@@ -216,6 +217,12 @@ static int bringup_cpu(unsigned int cpu)
 	return 0;
 }
 
+static int notify_starting(unsigned int cpu)
+{
+	cpu_notify(CPU_STARTING, cpu);
+	return 0;
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 EXPORT_SYMBOL(register_cpu_notifier);
 
@@ -303,17 +310,26 @@ static int notify_down_prepare(unsigned 
 	return err;
 }
 
+static int notify_dying(unsigned int cpu)
+{
+	cpu_notify(CPU_DYING, cpu);
+	return 0;
+}
+
 /* Take this CPU down. */
 static int __ref take_cpu_down(void *_param)
 {
-	int err, cpu = smp_processor_id();
+	int step, err, cpu = smp_processor_id();
 
 	/* Ensure this CPU doesn't handle any more interrupts. */
 	err = __cpu_disable();
 	if (err < 0)
 		return err;
 
-	cpu_notify(CPU_DYING, cpu);
+	for (step = CPUHP_AP_MAX; step >= CPUHP_AP_OFFLINE; step--) {
+		if (cpuhp_ap_states[step].teardown)
+			cpuhp_ap_states[step].teardown(cpu);
+	}
 	/* Park the stopper thread */
 	kthread_park(current);
 	return 0;
@@ -357,6 +373,7 @@ static int notify_dead(unsigned int cpu)
 #define notify_down_prepare	NULL
 #define takedown_cpu		NULL
 #define notify_dead		NULL
+#define notify_dying		NULL
 #endif
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -429,6 +446,24 @@ out:
 EXPORT_SYMBOL(cpu_down);
 #endif /*CONFIG_HOTPLUG_CPU*/
 
+/**
+ * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
+ * @cpu: cpu that just started
+ *
+ * This function calls the cpu_chain notifiers with CPU_STARTING.
+ * It must be called by the arch code on the new cpu, before the new cpu
+ * enables interrupts and before the "boot" cpu returns from __cpu_up().
+ */
+void notify_cpu_starting(unsigned int cpu)
+{
+	int step;
+
+	for (step = CPUHP_AP_OFFLINE; step <  CPUHP_AP_MAX; step++) {
+		if (cpuhp_ap_states[step].startup)
+			cpuhp_ap_states[step].startup(cpu);
+	}
+}
+
 static void undo_cpu_up(unsigned int cpu, int step)
 {
 	while (step--) {
@@ -703,19 +738,6 @@ core_initcall(cpu_hotplug_pm_sync_init);
 
 #endif /* CONFIG_PM_SLEEP_SMP */
 
-/**
- * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
- * @cpu: cpu that just started
- *
- * This function calls the cpu_chain notifiers with CPU_STARTING.
- * It must be called by the arch code on the new cpu, before the new cpu
- * enables interrupts and before the "boot" cpu returns from __cpu_up().
- */
-void __cpuinit notify_cpu_starting(unsigned int cpu)
-{
-	cpu_notify(CPU_STARTING, cpu);
-}
-
 #endif /* CONFIG_SMP */
 
 /* Boot processor state steps */
@@ -764,6 +786,24 @@ static struct cpuhp_step cpuhp_bp_states
 	},
 };
 
+/* Application processor state steps */
+static struct cpuhp_step cpuhp_ap_states[] = {
+#ifdef CONFIG_SMP
+	[CPUHP_AP_NOTIFY_STARTING] = {
+		.startup = notify_starting,
+		.teardown = NULL,
+	},
+	[CPUHP_AP_NOTIFY_DYING] = {
+		.startup = NULL,
+		.teardown = notify_dying,
+	},
+#endif
+	[CPUHP_MAX] = {
+		.startup = NULL,
+		.teardown = NULL,
+	},
+};
+
 /*
  * cpu_bit_bitmap[] is a special, "compressed" data structure that
  * represents all NR_CPUS bits binary values of 1<<nr.



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 09/40] cpu: hotplug: Implement setup/removal interface
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (8 preceding siblings ...)
  2013-01-31 12:11 ` [patch 10/40] sched: Convert to state machine callbacks Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-01 13:44   ` Hillf Danton
  2013-01-31 12:11 ` [patch 11/40] x86: uncore: Move teardown callback to CPU_DEAD Thomas Gleixner
                   ` (31 subsequent siblings)
  41 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpu-hotplug-implement-registration-interface.patch --]
[-- Type: text/plain, Size: 8725 bytes --]

Implement function which allow to setup/remove hotplug state
callbacks.

The default behaviour for setup is to call the startup function for
this state for (or on) all cpus which have a hotplug state >= the
installed state.

The default behaviour for removal is to call the teardown function for
this state for (or on) all cpus which have a hotplug state >= the
installed state.

For both setup and remove helper functions are provided, which prevent
the core to issue the callbacks. This simplifies the conversion of
existing hotplug notifiers.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |    1 
 include/linux/cpuhotplug.h |   70 +++++++++++++++++
 kernel/cpu.c               |  185 ++++++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 255 insertions(+), 1 deletion(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -17,6 +17,7 @@
 #include <linux/node.h>
 #include <linux/compiler.h>
 #include <linux/cpumask.h>
+#include <linux/cpuhotplug.h>
 
 struct device;
 
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -17,4 +17,74 @@ enum cpuhp_states {
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_MAX,
 };
+
+int __cpuhp_setup_state(enum cpuhp_states state, bool invoke,
+			int (*startup)(unsigned int cpu),
+			int (*teardown)(unsigned int cpu));
+
+/**
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * @state:	The state for which the calls are installed
+ * @startup:	startup callback function
+ * @teardown:	teardown callback function
+ *
+ * Installs the callback functions and invokes the startup callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline int
+cpuhp_setup_state(enum cpuhp_states state, int (*startup)(unsigned int cpu),
+		  int (*teardown)(unsigned int cpu))
+{
+	return __cpuhp_setup_state(state, true, startup, teardown);
+}
+
+/**
+ * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the callbacks
+ * @state:	The state for which the calls are installed
+ * @startup:	startup callback function
+ * @teardown:	teardown callback function
+ *
+ * No calls are executed. NOP if SMP=n or HOTPLUG_CPU=n
+ */
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU)
+static inline int
+cpuhp_setup_state_nocalls(enum cpuhp_states state,
+			 int (*startup)(unsigned int cpu),
+			 int (*teardown)(unsigned int cpu))
+{
+	return __cpuhp_setup_state(state, false, startup, teardown);
+}
+#else
+static inline int
+cpuhp_setup_state_nocalls(enum cpuhp_states state,
+			 int (*startup)(unsigned int cpu),
+			 int (*teardown)(unsigned int cpu))
+{
+	return 0;
+}
+#endif
+
+void __cpuhp_remove_state(enum cpuhp_states state, bool invoke);
+
+/**
+ * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
+ * @state:	The state for which the calls are removed
+ *
+ * Removes the callback functions and invokes the teardown callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline void cpuhp_remove_state(enum cpuhp_states state)
+{
+	__cpuhp_remove_state(state, true);
+}
+
+/**
+ * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking teardown
+ * @state:	The state for which the calls are removed
+ */
+static inline void cpuhp_remove_state_nocalls(enum cpuhp_states state)
+{
+	__cpuhp_remove_state(state, false);
+}
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -19,7 +19,6 @@
 #include <linux/mutex.h>
 #include <linux/gfp.h>
 #include <linux/suspend.h>
-#include <linux/cpuhotplug.h>
 
 #include "smpboot.h"
 
@@ -804,6 +803,190 @@ static struct cpuhp_step cpuhp_ap_states
 	},
 };
 
+/* Sanity check for callbacks */
+static int cpuhp_cb_check(enum cpuhp_states state)
+{
+	if (state <= CPUHP_OFFLINE || state >= CPUHP_MAX)
+		return -EINVAL;
+	return 0;
+}
+
+static bool cpuhp_is_ap_state(enum cpuhp_states state)
+{
+	return (state > CPUHP_AP_OFFLINE && state < CPUHP_AP_MAX);
+}
+
+static void cpuhp_store_callbacks(enum cpuhp_states state,
+				  int (*startup)(unsigned int cpu),
+				  int (*teardown)(unsigned int cpu))
+{
+	/* (Un)Install the callbacks for further cpu hotplug operations */
+	struct cpuhp_step *sp;
+
+	sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
+	sp[state].startup = startup;
+	sp[state].teardown = teardown;
+}
+
+static void *cpuhp_get_teardown_cb(enum cpuhp_states state)
+{
+	/* (Un)Install the callbacks for further cpu hotplug operations */
+	struct cpuhp_step *sp;
+
+	sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
+	return sp[state].teardown;
+}
+
+/* Helper function to run callback on the target cpu */
+static void cpuhp_on_cpu_cb(void *__cb)
+{
+	int (*cb)(unsigned int cpu) = __cb;
+
+	BUG_ON(cb(smp_processor_id()));
+}
+
+/*
+ * Call the startup/teardown function for a step either on the AP or
+ * on the current CPU.
+ */
+static int cpuhp_issue_call(int cpu, enum cpuhp_states state,
+			    int (*cb)(unsigned int), bool bringup)
+{
+	int ret;
+
+	if (!cb)
+		return 0;
+
+	if (cpuhp_is_ap_state(state)) {
+		/*
+		 * Note, that a function called on the AP is not
+		 * allowed to fail.
+		 */
+		smp_call_function_single(cpu, cpuhp_on_cpu_cb, cb, 1);
+		return 0;
+	}
+
+	/*
+	 * The non AP bound callbacks can fail on bringup. On teardown
+	 * e.g. module removal we crash for now.
+	 */
+	ret = cb(cpu);
+	BUG_ON(ret && !bringup);
+	return ret;
+}
+
+/*
+ * Called from __cpuhp_setup_state on a recoverable failure.
+ *
+ * Note: The teardown callbacks for rollback are not allowed to fail!
+ */
+static void cpuhp_rollback_install(int failedcpu, enum cpuhp_states state,
+				   int (*teardown)(unsigned int cpu))
+{
+	int cpu;
+
+	if (!teardown)
+		return;
+
+	/* Roll back the already executed steps on the other cpus */
+	for_each_present_cpu(cpu) {
+		int cpustate = per_cpu(cpuhp_state, cpu);
+
+		if (cpu >= failedcpu)
+			break;
+
+		/* Did we invoke the startup call on that cpu ? */
+		if (cpustate >= state)
+			cpuhp_issue_call(cpu, state, teardown, false);
+	}
+}
+
+/**
+ * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
+ * @state:	The state to setup
+ * @invoke:	If true, the startup function is invoked for cpus where
+ *		cpu state >= @state
+ * @startup:	startup callback function
+ * @teardown:	teardown callback function
+ *
+ * Returns 0 if successful, otherwise a proper error code
+ */
+int __cpuhp_setup_state(enum cpuhp_states state, bool invoke,
+			int (*startup)(unsigned int cpu),
+			int (*teardown)(unsigned int cpu))
+{
+	int cpu, ret = 0;
+
+	if (cpuhp_cb_check(state))
+		return -EINVAL;
+
+	get_online_cpus();
+
+	if (!invoke || !startup)
+		goto install;
+
+	/*
+	 * Try to call the startup callback for each present cpu
+	 * depending on the hotplug state of the cpu.
+	 */
+	for_each_present_cpu(cpu) {
+		int ret, cpustate = per_cpu(cpuhp_state, cpu);
+
+		if (cpustate < state)
+			continue;
+
+		ret = cpuhp_issue_call(cpu, state, startup, true);
+		if (ret) {
+			cpuhp_rollback_install(cpu, state, teardown);
+			goto out;
+		}
+	}
+install:
+	cpuhp_store_callbacks(state, startup, teardown);
+out:
+	put_online_cpus();
+	return ret;
+}
+EXPORT_SYMBOL(__cpuhp_setup_state);
+
+/**
+ * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
+ * @state:	The state to remove
+ * @invoke:	If true, the teardown function is invoked for cpus where
+ *		cpu state >= @state
+ *
+ * The teardown callback is currently not allowed to fail. Think
+ * about module removal!
+ */
+void __cpuhp_remove_state(enum cpuhp_states state, bool invoke)
+{
+	int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
+	int cpu;
+
+	BUG_ON(cpuhp_cb_check(state));
+
+	get_online_cpus();
+
+	if (!invoke || !teardown)
+		goto remove;
+
+	/*
+	 * Call the teardown callback for each present cpu depending
+	 * on the hotplug state of the cpu. This function is not
+	 * allowed to fail currently!
+	 */
+	for_each_present_cpu(cpu) {
+		int cpustate = per_cpu(cpuhp_state, cpu);
+
+		if (cpustate >= state)
+			cpuhp_issue_call(cpu, state, teardown, false);
+	}
+remove:
+	cpuhp_store_callbacks(state, NULL, NULL);
+	put_online_cpus();
+}
+EXPORT_SYMBOL(__cpuhp_remove_state);
+
 /*
  * cpu_bit_bitmap[] is a special, "compressed" data structure that
  * represents all NR_CPUS bits binary values of 1<<nr.



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 10/40] sched: Convert to state machine callbacks
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (7 preceding siblings ...)
  2013-01-31 12:11 ` [patch 08/40] cpu: hotplug: Convert the hotplugged processor work to a state machine Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-11 23:46   ` Paul E. McKenney
  2013-01-31 12:11 ` [patch 09/40] cpu: hotplug: Implement setup/removal interface Thomas Gleixner
                   ` (32 subsequent siblings)
  41 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-sched-convert-active-inactive.patch --]
[-- Type: text/plain, Size: 9305 bytes --]

The scheduler sports quite a bunch of hotplug notifiers. One reason
for multiple notifiers is the fact, that the startup and teardown
process are asymetric. Now the scheduler wants to be called early on
startup and late on teardown. That requires to install two different
notifiers for the same issue.

With the state machine implementation we can register a callback pair
for startup and teardown at the appropriate spot.

This patch converts the notifiers which are setup with special
priorities and combines CPU_PRI_SCHED and CPU_PRI_CPUSET notifiers to
a single callback. They run back to back anyway and we can make sure
in the callbacks that the ordering inside the scheduler is
correct. These notifiers are installed in sched_init_smp() as we can't
run them during the bringup of the non boot cpus because the smp
scheduler is setup after that. It would be nice if we just could
compile them in, but that needs a larger surgery to the scheduler code
and is beyond the scope of this patch.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |   16 ----
 include/linux/cpuhotplug.h |    6 +
 kernel/cpu.c               |    4 +
 kernel/sched/core.c        |  154 +++++++++++++++++----------------------------
 4 files changed, 69 insertions(+), 111 deletions(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -58,22 +58,6 @@ extern ssize_t arch_print_cpu_modalias(s
  * CPU notifier priorities.
  */
 enum {
-	/*
-	 * SCHED_ACTIVE marks a cpu which is coming up active during
-	 * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
-	 * notifier.  CPUSET_ACTIVE adjusts cpuset according to
-	 * cpu_active mask right after SCHED_ACTIVE.  During
-	 * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
-	 * ordered in the similar way.
-	 *
-	 * This ordering guarantees consistent cpu_active mask and
-	 * migration behavior to all cpu notifiers.
-	 */
-	CPU_PRI_SCHED_ACTIVE	= INT_MAX,
-	CPU_PRI_CPUSET_ACTIVE	= INT_MAX - 1,
-	CPU_PRI_SCHED_INACTIVE	= INT_MIN + 1,
-	CPU_PRI_CPUSET_INACTIVE	= INT_MIN,
-
 	/* migration should happen before other stuff but after perf */
 	CPU_PRI_PERF		= 20,
 	CPU_PRI_MIGRATION	= 10,
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -6,13 +6,16 @@ enum cpuhp_states {
 	CPUHP_CREATE_THREADS,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
+	CPUHP_SCHED_DEAD,
 	CPUHP_BRINGUP_CPU,
 	CPUHP_AP_OFFLINE,
+	CPUHP_AP_SCHED_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_MAX,
 	CPUHP_TEARDOWN_CPU,
 	CPUHP_PERCPU_THREADS,
+	CPUHP_SCHED_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_MAX,
@@ -87,4 +90,7 @@ static inline void cpuhp_remove_state_no
 	__cpuhp_remove_state(state, false);
 }
 
+/* Compiled in scheduler hotplug functions */
+int sched_starting_cpu(unsigned int cpu);
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -788,6 +788,10 @@ static struct cpuhp_step cpuhp_bp_states
 /* Application processor state steps */
 static struct cpuhp_step cpuhp_ap_states[] = {
 #ifdef CONFIG_SMP
+	[CPUHP_AP_SCHED_STARTING] = {
+		.startup = sched_starting_cpu,
+		.teardown = NULL,
+	},
 	[CPUHP_AP_NOTIFY_STARTING] = {
 		.startup = notify_starting,
 		.teardown = NULL,
Index: linux-2.6/kernel/sched/core.c
===================================================================
--- linux-2.6.orig/kernel/sched/core.c
+++ linux-2.6/kernel/sched/core.c
@@ -5167,31 +5167,6 @@ static struct notifier_block __cpuinitda
 	.priority = CPU_PRI_MIGRATION,
 };
 
-static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
-				      unsigned long action, void *hcpu)
-{
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-	case CPU_DOWN_FAILED:
-		set_cpu_active((long)hcpu, true);
-		return NOTIFY_OK;
-	default:
-		return NOTIFY_DONE;
-	}
-}
-
-static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
-{
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_PREPARE:
-		set_cpu_active((long)hcpu, false);
-		return NOTIFY_OK;
-	default:
-		return NOTIFY_DONE;
-	}
-}
-
 static int __init migration_init(void)
 {
 	void *cpu = (void *)(long)smp_processor_id();
@@ -5203,10 +5178,6 @@ static int __init migration_init(void)
 	migration_call(&migration_notifier, CPU_ONLINE, cpu);
 	register_cpu_notifier(&migration_notifier);
 
-	/* Register cpu active notifiers */
-	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
-	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
-
 	return 0;
 }
 early_initcall(migration_init);
@@ -6292,42 +6263,12 @@ static void sched_domains_numa_masks_cle
 	}
 }
 
-/*
- * Update sched_domains_numa_masks[level][node] array when new cpus
- * are onlined.
- */
-static int sched_domains_numa_masks_update(struct notifier_block *nfb,
-					   unsigned long action,
-					   void *hcpu)
-{
-	int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_ONLINE:
-		sched_domains_numa_masks_set(cpu);
-		break;
-
-	case CPU_DEAD:
-		sched_domains_numa_masks_clear(cpu);
-		break;
-
-	default:
-		return NOTIFY_DONE;
-	}
-
-	return NOTIFY_OK;
-}
 #else
-static inline void sched_init_numa(void)
-{
-}
-
-static int sched_domains_numa_masks_update(struct notifier_block *nfb,
-					   unsigned long action,
-					   void *hcpu)
-{
-	return 0;
-}
+static inline void sched_init_numa(void) { }
+#ifdef CONFIG_HOTPLUG_CPU
+static void sched_domains_numa_masks_set(int cpu) { }
+static void sched_domains_numa_masks_clear(int cpu) { }
+#endif
 #endif /* CONFIG_NUMA */
 
 static int __sdt_alloc(const struct cpumask *cpu_map)
@@ -6696,6 +6637,7 @@ match2:
 	mutex_unlock(&sched_domains_mutex);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 static int num_cpus_frozen;	/* used to mark begin/end of suspend/resume */
 
 /*
@@ -6706,13 +6648,9 @@ static int num_cpus_frozen;	/* used to m
  * If we come here as part of a suspend/resume, don't touch cpusets because we
  * want to restore it back to its original state upon resume anyway.
  */
-static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
-			     void *hcpu)
+static void cpuset_cpu_active(void)
 {
-	switch (action) {
-	case CPU_ONLINE_FROZEN:
-	case CPU_DOWN_FAILED_FROZEN:
-
+	if (cpuhp_tasks_frozen) {
 		/*
 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
 		 * resume sequence. As long as this is not the last online
@@ -6722,40 +6660,62 @@ static int cpuset_cpu_active(struct noti
 		num_cpus_frozen--;
 		if (likely(num_cpus_frozen)) {
 			partition_sched_domains(1, NULL, NULL);
-			break;
+			return;
 		}
-
 		/*
 		 * This is the last CPU online operation. So fall through and
 		 * restore the original sched domains by considering the
 		 * cpuset configurations.
 		 */
-
-	case CPU_ONLINE:
-	case CPU_DOWN_FAILED:
-		cpuset_update_active_cpus(true);
-		break;
-	default:
-		return NOTIFY_DONE;
 	}
-	return NOTIFY_OK;
+	cpuset_update_active_cpus(true);
 }
 
-static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
-			       void *hcpu)
+static void cpuset_cpu_inactive(void)
 {
-	switch (action) {
-	case CPU_DOWN_PREPARE:
-		cpuset_update_active_cpus(false);
-		break;
-	case CPU_DOWN_PREPARE_FROZEN:
+	if (cpuhp_tasks_frozen) {
 		num_cpus_frozen++;
 		partition_sched_domains(1, NULL, NULL);
-		break;
-	default:
-		return NOTIFY_DONE;
-	}
-	return NOTIFY_OK;
+	} else
+		cpuset_update_active_cpus(false);
+}
+
+static int sched_dead_cpu(unsigned int cpu)
+{
+	sched_domains_numa_masks_clear(cpu);
+	return 0;
+}
+
+static int sched_online_cpu(unsigned int cpu)
+{
+	/* Looks redundant, but we need it in case of down canceled */
+	set_cpu_active(cpu, true);
+	/*
+	 * Asymetric to sched_dead_cpu, but this just fiddles with
+	 * bits. Sigh
+	 */
+	sched_domains_numa_masks_set(cpu);
+	/* This is actually symetric */
+	cpuset_cpu_active();
+	return 0;
+}
+
+static int sched_offline_cpu(unsigned int cpu)
+{
+	set_cpu_active(cpu, false);
+	cpuset_cpu_inactive();
+	return 0;
+}
+#else
+#define sched_dead_cpu		NULL
+#define sched_online_cpu	NULL
+#define sched_offline_cpu	NULL
+#endif
+
+int __cpuinit sched_starting_cpu(unsigned int cpu)
+{
+	set_cpu_active(cpu, true);
+	return 0;
 }
 
 void __init sched_init_smp(void)
@@ -6776,9 +6736,13 @@ void __init sched_init_smp(void)
 	mutex_unlock(&sched_domains_mutex);
 	put_online_cpus();
 
-	hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
-	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
-	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
+	/*
+	 * Note: These callbacks are installed late because we init
+	 * numa and sched domains after we brought up the cpus.
+	 */
+	cpuhp_setup_state_nocalls(CPUHP_SCHED_DEAD, NULL, sched_dead_cpu);
+	cpuhp_setup_state_nocalls(CPUHP_SCHED_ONLINE, sched_online_cpu,
+				  sched_offline_cpu);
 
 	/* RT runtime code needs to handle some hotplug events */
 	hotcpu_notifier(update_runtime, 0);



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 11/40] x86: uncore: Move teardown callback to CPU_DEAD
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (9 preceding siblings ...)
  2013-01-31 12:11 ` [patch 09/40] cpu: hotplug: Implement setup/removal interface Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 12/40] x86: uncore: Convert to hotplug state machine Thomas Gleixner
                   ` (30 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: x86-uncore-refactor-hotplug-notifiers.patch --]
[-- Type: text/plain, Size: 919 bytes --]

No point calling this from the dying cpu.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/cpu/perf_event_intel_uncore.c |    6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Index: linux-2.6/arch/x86/kernel/cpu/perf_event_intel_uncore.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ linux-2.6/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2622,7 +2622,7 @@ static void __init uncore_pci_exit(void)
 	}
 }
 
-static void __cpuinit uncore_cpu_dying(int cpu)
+static void __cpuinit uncore_cpu_dead(int cpu)
 {
 	struct intel_uncore_type *type;
 	struct intel_uncore_pmu *pmu;
@@ -2803,8 +2803,8 @@ static int
 		uncore_cpu_starting(cpu);
 		break;
 	case CPU_UP_CANCELED:
-	case CPU_DYING:
-		uncore_cpu_dying(cpu);
+	case CPU_DEAD:
+		uncore_cpu_dead(cpu);
 		break;
 	default:
 		break;



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 12/40] x86: uncore: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (10 preceding siblings ...)
  2013-01-31 12:11 ` [patch 11/40] x86: uncore: Move teardown callback to CPU_DEAD Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 13/40] perf: " Thomas Gleixner
                   ` (29 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-convert-intel-uncore.patch --]
[-- Type: text/plain, Size: 6159 bytes --]

Convert the notifiers to state machine states and let the core code do
the setup for the already online cpus. This notifier has a completely
undocumented ordering requirement versus perf hardcoded in the
notifier priority. Move the callback to the proper place in the state
machine.

Note, the original code did not check the return values of the setup
functions and I could not be bothered to twist my brain around undoing
the previous steps. Marked with a FIXME.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/cpu/perf_event_intel_uncore.c |  109 ++++++--------------------
 include/linux/cpuhotplug.h                    |    3 
 2 files changed, 30 insertions(+), 82 deletions(-)

Index: linux-2.6/arch/x86/kernel/cpu/perf_event_intel_uncore.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ linux-2.6/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2622,7 +2622,7 @@ static void __init uncore_pci_exit(void)
 	}
 }
 
-static void __cpuinit uncore_cpu_dead(int cpu)
+static int __cpuinit uncore_dead_cpu(unsigned int cpu)
 {
 	struct intel_uncore_type *type;
 	struct intel_uncore_pmu *pmu;
@@ -2639,9 +2639,11 @@ static void __cpuinit uncore_cpu_dead(in
 				kfree(box);
 		}
 	}
+	return 0;
 }
 
-static int __cpuinit uncore_cpu_starting(int cpu)
+/* Must run on the target cpu */
+static int __cpuinit uncore_starting_cpu(unsigned int cpu)
 {
 	struct intel_uncore_type *type;
 	struct intel_uncore_pmu *pmu;
@@ -2681,12 +2683,12 @@ static int __cpuinit uncore_cpu_starting
 	return 0;
 }
 
-static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
+static int __cpuinit uncore_prepare_cpu(unsigned int cpu)
 {
 	struct intel_uncore_type *type;
 	struct intel_uncore_pmu *pmu;
 	struct intel_uncore_box *box;
-	int i, j;
+	int i, j, phys_id = -1;
 
 	for (i = 0; msr_uncores[i]; i++) {
 		type = msr_uncores[i];
@@ -2745,13 +2747,13 @@ uncore_change_context(struct intel_uncor
 	}
 }
 
-static void __cpuinit uncore_event_exit_cpu(int cpu)
+static int __cpuinit uncore_offline_cpu(unsigned int cpu)
 {
 	int i, phys_id, target;
 
 	/* if exiting cpu is used for collecting uncore events */
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-		return;
+		return 0;
 
 	/* find a new cpu to collect uncore events */
 	phys_id = topology_physical_package_id(cpu);
@@ -2771,78 +2773,29 @@ static void __cpuinit uncore_event_exit_
 
 	uncore_change_context(msr_uncores, cpu, target);
 	uncore_change_context(pci_uncores, cpu, target);
+	return 0;
 }
 
-static void __cpuinit uncore_event_init_cpu(int cpu)
+static int __cpuinit uncore_online_cpu(unsigned int cpu)
 {
 	int i, phys_id;
 
 	phys_id = topology_physical_package_id(cpu);
 	for_each_cpu(i, &uncore_cpu_mask) {
 		if (phys_id == topology_physical_package_id(i))
-			return;
+			return 0;
 	}
 
 	cpumask_set_cpu(cpu, &uncore_cpu_mask);
 
 	uncore_change_context(msr_uncores, -1, cpu);
 	uncore_change_context(pci_uncores, -1, cpu);
-}
-
-static int
- __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	/* allocate/free data structure for uncore box */
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		uncore_cpu_prepare(cpu, -1);
-		break;
-	case CPU_STARTING:
-		uncore_cpu_starting(cpu);
-		break;
-	case CPU_UP_CANCELED:
-	case CPU_DEAD:
-		uncore_cpu_dead(cpu);
-		break;
-	default:
-		break;
-	}
-
-	/* select the cpu that collects uncore events */
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_FAILED:
-	case CPU_STARTING:
-		uncore_event_init_cpu(cpu);
-		break;
-	case CPU_DOWN_PREPARE:
-		uncore_event_exit_cpu(cpu);
-		break;
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
-	.notifier_call	= uncore_cpu_notifier,
-	/*
-	 * to migrate uncore events, our notifier should be executed
-	 * before perf core's notifier.
-	 */
-	.priority	= CPU_PRI_PERF + 1,
-};
-
-static void __init uncore_cpu_setup(void *dummy)
-{
-	uncore_cpu_starting(smp_processor_id());
+	return 0;
 }
 
 static int __init uncore_cpu_init(void)
 {
-	int ret, cpu, max_cores;
+	int ret, max_cores;
 
 	max_cores = boot_cpu_data.x86_max_cores;
 	switch (boot_cpu_data.x86_model) {
@@ -2879,28 +2832,20 @@ static int __init uncore_cpu_init(void)
 	if (ret)
 		return ret;
 
-	get_online_cpus();
-
-	for_each_online_cpu(cpu) {
-		int i, phys_id = topology_physical_package_id(cpu);
-
-		for_each_cpu(i, &uncore_cpu_mask) {
-			if (phys_id == topology_physical_package_id(i)) {
-				phys_id = -1;
-				break;
-			}
-		}
-		if (phys_id < 0)
-			continue;
-
-		uncore_cpu_prepare(cpu, phys_id);
-		uncore_event_init_cpu(cpu);
-	}
-	on_each_cpu(uncore_cpu_setup, NULL, 1);
-
-	register_cpu_notifier(&uncore_cpu_nb);
-
-	put_online_cpus();
+	/*
+	 * Install callbacks. Core will call them for each online
+	 * cpu.
+	 *
+	 * FIXME: This should check the return value, but the original
+	 * code did not do that either and I have no idea how to undo
+	 * uncore_types_init(). Brilliant stuff that, isn't it ?
+	 */
+	cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, uncore_prepare_cpu,
+			  uncore_dead_cpu);
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
+			  uncore_starting_cpu, NULL);
+	cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_ONLINE, uncore_online_cpu,
+			  uncore_offline_cpu);
 
 	return 0;
 }
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -4,12 +4,14 @@
 enum cpuhp_states {
 	CPUHP_OFFLINE,
 	CPUHP_CREATE_THREADS,
+	CPUHP_PERF_X86_UNCORE_PREP,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_SCHED_DEAD,
 	CPUHP_BRINGUP_CPU,
 	CPUHP_AP_OFFLINE,
 	CPUHP_AP_SCHED_STARTING,
+	CPUHP_AP_PERF_X86_UNCORE_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_MAX,
@@ -18,6 +20,7 @@ enum cpuhp_states {
 	CPUHP_SCHED_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
+	CPUHP_PERF_X86_UNCORE_ONLINE,
 	CPUHP_MAX,
 };
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 13/40] perf: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (11 preceding siblings ...)
  2013-01-31 12:11 ` [patch 12/40] x86: uncore: Convert to hotplug state machine Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 14/40] x86: perf: Convert the core to the " Thomas Gleixner
                   ` (28 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-perf-core-convert.patch --]
[-- Type: text/plain, Size: 4936 bytes --]

Actually a nice symetric startup/teardown pair which fits proper in
the state machine concept. In the long run we should be able to invoke
the startup callback for the boot cpu via the state machine and get
rid of the init function which invokes it on the boot cpu.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |   11 +++++++++++
 kernel/cpu.c               |    8 ++++++++
 kernel/events/core.c       |   36 +++++++-----------------------------
 3 files changed, 26 insertions(+), 29 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -5,6 +5,7 @@ enum cpuhp_states {
 	CPUHP_OFFLINE,
 	CPUHP_CREATE_THREADS,
 	CPUHP_PERF_X86_UNCORE_PREP,
+	CPUHP_PERF_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_SCHED_DEAD,
@@ -18,6 +19,7 @@ enum cpuhp_states {
 	CPUHP_TEARDOWN_CPU,
 	CPUHP_PERCPU_THREADS,
 	CPUHP_SCHED_ONLINE,
+	CPUHP_PERF_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,
@@ -96,4 +98,13 @@ static inline void cpuhp_remove_state_no
 /* Compiled in scheduler hotplug functions */
 int sched_starting_cpu(unsigned int cpu);
 
+ /* Performance counter hotplug functions */
+#ifdef CONFIG_PERF_EVENTS
+int perf_event_init_cpu(unsigned int cpu);
+int perf_event_exit_cpu(unsigned int cpu);
+#else
+#define perf_event_init_cpu	NULL
+#define perf_event_exit_cpu	NULL
+#endif
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -750,6 +750,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = smpboot_create_threads,
 		.teardown = NULL,
 	},
+	[CPUHP_PERF_PREPARE] = {
+		.startup = perf_event_init_cpu,
+		.teardown = perf_event_exit_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
@@ -770,6 +774,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = smpboot_unpark_threads,
 		.teardown = smpboot_park_threads,
 	},
+	[CPUHP_PERF_ONLINE] = {
+		.startup = perf_event_init_cpu,
+		.teardown = perf_event_exit_cpu,
+	},
 	[CPUHP_NOTIFY_ONLINE] = {
 		.startup = notify_online,
 		.teardown = NULL,
Index: linux-2.6/kernel/events/core.c
===================================================================
--- linux-2.6.orig/kernel/events/core.c
+++ linux-2.6/kernel/events/core.c
@@ -5261,7 +5261,7 @@ static int swevent_hlist_get_cpu(struct 
 	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
 		struct swevent_hlist *hlist;
 
-		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
+		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
 		if (!hlist) {
 			err = -ENOMEM;
 			goto exit;
@@ -7263,12 +7263,12 @@ static void __init perf_event_init_all_c
 	}
 }
 
-static void __cpuinit perf_event_init_cpu(int cpu)
+int __cpuinit perf_event_init_cpu(unsigned int cpu)
 {
 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
 	mutex_lock(&swhash->hlist_mutex);
-	if (swhash->hlist_refcount > 0) {
+	if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
 		struct swevent_hlist *hlist;
 
 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -7276,6 +7276,7 @@ static void __cpuinit perf_event_init_cp
 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
 	}
 	mutex_unlock(&swhash->hlist_mutex);
+	return 0;
 }
 
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
@@ -7318,7 +7319,7 @@ static void perf_event_exit_cpu_context(
 	srcu_read_unlock(&pmus_srcu, idx);
 }
 
-static void perf_event_exit_cpu(int cpu)
+int perf_event_exit_cpu(unsigned int cpu)
 {
 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
@@ -7327,6 +7328,7 @@ static void perf_event_exit_cpu(int cpu)
 	mutex_unlock(&swhash->hlist_mutex);
 
 	perf_event_exit_cpu_context(cpu);
+	return 0;
 }
 #else
 static inline void perf_event_exit_cpu(int cpu) { }
@@ -7352,30 +7354,6 @@ static struct notifier_block perf_reboot
 	.priority = INT_MIN,
 };
 
-static int __cpuinit
-perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-
-	case CPU_UP_PREPARE:
-	case CPU_DOWN_FAILED:
-		perf_event_init_cpu(cpu);
-		break;
-
-	case CPU_UP_CANCELED:
-	case CPU_DOWN_PREPARE:
-		perf_event_exit_cpu(cpu);
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
 void __init perf_event_init(void)
 {
 	int ret;
@@ -7388,7 +7366,7 @@ void __init perf_event_init(void)
 	perf_pmu_register(&perf_cpu_clock, NULL, -1);
 	perf_pmu_register(&perf_task_clock, NULL, -1);
 	perf_tp_register();
-	perf_cpu_notifier(perf_cpu_notify);
+	perf_event_init_cpu(smp_processor_id());
 	register_reboot_notifier(&perf_reboot_notifier);
 
 	ret = init_hw_breakpoint();



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 14/40] x86: perf: Convert the core to the hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (12 preceding siblings ...)
  2013-01-31 12:11 ` [patch 13/40] perf: " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 16/40] blackfin: perf: Convert hotplug notifier to " Thomas Gleixner
                   ` (27 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-x86-perf-core-convert.patch --]
[-- Type: text/plain, Size: 5383 bytes --]

Replace the perf_notifier() install mechanism, which invokes magically
the callback on the current cpu. Convert the hardware specific
callbacks which are invoked from the x86 perf core to return proper
error codes instead of totally pointless NOTIFY_BAD return values.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/cpu/perf_event.c       |   78 ++++++++++++++++++---------------
 arch/x86/kernel/cpu/perf_event_amd.c   |    6 +-
 arch/x86/kernel/cpu/perf_event_intel.c |    6 +-
 include/linux/cpuhotplug.h             |    3 +
 4 files changed, 52 insertions(+), 41 deletions(-)

Index: linux-2.6/arch/x86/kernel/cpu/perf_event.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/perf_event.c
+++ linux-2.6/arch/x86/kernel/cpu/perf_event.c
@@ -1252,47 +1252,45 @@ perf_event_nmi_handler(unsigned int cmd,
 struct event_constraint emptyconstraint;
 struct event_constraint unconstrained;
 
-static int __cpuinit
-x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int __cpuinit x86_pmu_prepare_cpu(unsigned int cpu)
 {
-	unsigned int cpu = (long)hcpu;
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-	int ret = NOTIFY_OK;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		cpuc->kfree_on_online = NULL;
-		if (x86_pmu.cpu_prepare)
-			ret = x86_pmu.cpu_prepare(cpu);
-		break;
-
-	case CPU_STARTING:
-		if (x86_pmu.attr_rdpmc)
-			set_in_cr4(X86_CR4_PCE);
-		if (x86_pmu.cpu_starting)
-			x86_pmu.cpu_starting(cpu);
-		break;
+	cpuc->kfree_on_online = NULL;
+	if (x86_pmu.cpu_prepare)
+		return x86_pmu.cpu_prepare(cpu);
+	return 0;
+}
 
-	case CPU_ONLINE:
-		kfree(cpuc->kfree_on_online);
-		break;
+static int __cpuinit x86_pmu_dead_cpu(unsigned int cpu)
+{
+	if (x86_pmu.cpu_dead)
+		x86_pmu.cpu_dead(cpu);
+	return 0;
+}
 
-	case CPU_DYING:
-		if (x86_pmu.cpu_dying)
-			x86_pmu.cpu_dying(cpu);
-		break;
+static int __cpuinit x86_pmu_online_cpu(unsigned int cpu)
+{
+	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
-	case CPU_UP_CANCELED:
-	case CPU_DEAD:
-		if (x86_pmu.cpu_dead)
-			x86_pmu.cpu_dead(cpu);
-		break;
+	kfree(cpuc->kfree_on_online);
+	return 0;
+}
 
-	default:
-		break;
-	}
+static int __cpuinit x86_pmu_starting_cpu(unsigned int cpu)
+{
+	if (x86_pmu.attr_rdpmc)
+		set_in_cr4(X86_CR4_PCE);
+	if (x86_pmu.cpu_starting)
+		x86_pmu.cpu_starting(cpu);
+	return 0;
+}
 
-	return ret;
+static int __cpuinit x86_pmu_dying_cpu(unsigned int cpu)
+{
+	if (x86_pmu.cpu_dying)
+		x86_pmu.cpu_dying(cpu);
+	return 0;
 }
 
 static void __init pmu_check_apic(void)
@@ -1485,8 +1483,18 @@ static int __init init_hw_perf_events(vo
 	pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-	perf_cpu_notifier(x86_pmu_notifier);
-
+	/*
+	 * Install callbacks. Core will call them for each online
+	 * cpu.
+	 *
+	 * FIXME: This should check the return value, but the original
+	 * code did not do that either....
+	 */
+	cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, x86_pmu_prepare_cpu,
+			  x86_pmu_dead_cpu);
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING, x86_pmu_starting_cpu,
+			  x86_pmu_dying_cpu);
+	cpuhp_setup_state(CPUHP_PERF_X86_ONLINE, x86_pmu_online_cpu, NULL);
 	return 0;
 }
 early_initcall(init_hw_perf_events);
Index: linux-2.6/arch/x86/kernel/cpu/perf_event_amd.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/perf_event_amd.c
+++ linux-2.6/arch/x86/kernel/cpu/perf_event_amd.c
@@ -349,13 +349,13 @@ static int amd_pmu_cpu_prepare(int cpu)
 	WARN_ON_ONCE(cpuc->amd_nb);
 
 	if (boot_cpu_data.x86_max_cores < 2)
-		return NOTIFY_OK;
+		return 0;
 
 	cpuc->amd_nb = amd_alloc_nb(cpu);
 	if (!cpuc->amd_nb)
-		return NOTIFY_BAD;
+		return -ENOMEM;
 
-	return NOTIFY_OK;
+	return 0;
 }
 
 static void amd_pmu_cpu_starting(int cpu)
Index: linux-2.6/arch/x86/kernel/cpu/perf_event_intel.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/perf_event_intel.c
+++ linux-2.6/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1662,13 +1662,13 @@ static int intel_pmu_cpu_prepare(int cpu
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
 	if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
-		return NOTIFY_OK;
+		return 0;
 
 	cpuc->shared_regs = allocate_shared_regs(cpu);
 	if (!cpuc->shared_regs)
-		return NOTIFY_BAD;
+		return -ENOMEM;
 
-	return NOTIFY_OK;
+	return 0;
 }
 
 static void intel_pmu_cpu_starting(int cpu)
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -5,6 +5,7 @@ enum cpuhp_states {
 	CPUHP_OFFLINE,
 	CPUHP_CREATE_THREADS,
 	CPUHP_PERF_X86_UNCORE_PREP,
+	CPUHP_PERF_X86_PREPARE,
 	CPUHP_PERF_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
@@ -13,6 +14,7 @@ enum cpuhp_states {
 	CPUHP_AP_OFFLINE,
 	CPUHP_AP_SCHED_STARTING,
 	CPUHP_AP_PERF_X86_UNCORE_STARTING,
+	CPUHP_AP_PERF_X86_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_MAX,
@@ -23,6 +25,7 @@ enum cpuhp_states {
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,
+	CPUHP_PERF_X86_ONLINE,
 	CPUHP_MAX,
 };
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 15/40] x86: perf: Convert AMD IBS to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (14 preceding siblings ...)
  2013-01-31 12:11 ` [patch 16/40] blackfin: perf: Convert hotplug notifier to " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 17/40] powerpc: perf: Convert book3s notifier to state machine callbacks Thomas Gleixner
                   ` (25 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: x86-perf-amd-ibs-convert.patch --]
[-- Type: text/plain, Size: 3669 bytes --]

Install the callbacks via the state machine and let the core invoke
the callbacks on the already online cpus.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/cpu/perf_event_amd_ibs.c |   54 +++++++++++--------------------
 include/linux/cpuhotplug.h               |    1 
 2 files changed, 21 insertions(+), 34 deletions(-)

Index: linux-2.6/arch/x86/kernel/cpu/perf_event_amd_ibs.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ linux-2.6/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -637,13 +637,10 @@ static __init int perf_ibs_pmu_init(stru
 	return ret;
 }
 
-static __init int perf_event_ibs_init(void)
+static __init void perf_event_ibs_init(void)
 {
 	struct attribute **attr = ibs_op_format_attrs;
 
-	if (!ibs_caps)
-		return -ENODEV;	/* ibs not supported by the cpu */
-
 	perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
 
 	if (ibs_caps & IBS_CAPS_OPCNT) {
@@ -654,13 +651,11 @@ static __init int perf_event_ibs_init(vo
 
 	register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
 	printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
-
-	return 0;
 }
 
 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
 
-static __init int perf_event_ibs_init(void) { return 0; }
+static __init void perf_event_ibs_init(void) { }
 
 #endif
 
@@ -827,11 +822,10 @@ static inline int get_ibs_lvt_offset(voi
 	return val & IBSCTL_LVT_OFFSET_MASK;
 }
 
-static void setup_APIC_ibs(void *dummy)
+static void setup_APIC_ibs(void)
 {
-	int offset;
+	int offset = get_ibs_lvt_offset();
 
-	offset = get_ibs_lvt_offset();
 	if (offset < 0)
 		goto failed;
 
@@ -842,30 +836,19 @@ failed:
 		smp_processor_id());
 }
 
-static void clear_APIC_ibs(void *dummy)
+static int __cpuinit x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
 {
-	int offset;
-
-	offset = get_ibs_lvt_offset();
-	if (offset >= 0)
-		setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
+	setup_APIC_ibs();
+	return 0;
 }
 
-static int __cpuinit
-perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int __cpuinit x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
 {
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		setup_APIC_ibs(NULL);
-		break;
-	case CPU_DYING:
-		clear_APIC_ibs(NULL);
-		break;
-	default:
-		break;
-	}
+	int offset = get_ibs_lvt_offset();
 
-	return NOTIFY_OK;
+	if (offset >= 0)
+		setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
+	return 0;
 }
 
 static __init int amd_ibs_init(void)
@@ -889,15 +872,18 @@ static __init int amd_ibs_init(void)
 	if (!ibs_eilvt_valid())
 		goto out;
 
-	get_online_cpus();
 	ibs_caps = caps;
 	/* make ibs_caps visible to other cpus: */
 	smp_mb();
-	perf_cpu_notifier(perf_ibs_cpu_notifier);
-	smp_call_function(setup_APIC_ibs, NULL, 1);
-	put_online_cpus();
+	/*
+	 * x86_pmu_amd_ibs_starting_cpu will be called from core on
+	 * all online cpus.
+	 */
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
+			  x86_pmu_amd_ibs_starting_cpu,
+			  x86_pmu_amd_ibs_dying_cpu);
 
-	ret = perf_event_ibs_init();
+	perf_event_ibs_init();
 out:
 	if (ret)
 		pr_err("Failed to setup IBS, %d\n", ret);
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -14,6 +14,7 @@ enum cpuhp_states {
 	CPUHP_AP_OFFLINE,
 	CPUHP_AP_SCHED_STARTING,
 	CPUHP_AP_PERF_X86_UNCORE_STARTING,
+	CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
 	CPUHP_AP_PERF_X86_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 16/40] blackfin: perf: Convert hotplug notifier to state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (13 preceding siblings ...)
  2013-01-31 12:11 ` [patch 14/40] x86: perf: Convert the core to the " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 15/40] x86: perf: Convert AMD IBS to hotplug " Thomas Gleixner
                   ` (26 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: blackfin-convert-perf-notifier.patch --]
[-- Type: text/plain, Size: 1819 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/blackfin/kernel/perf_event.c |   25 ++++---------------------
 include/linux/cpuhotplug.h        |    1 +
 2 files changed, 5 insertions(+), 21 deletions(-)

Index: linux-2.6/arch/blackfin/kernel/perf_event.c
===================================================================
--- linux-2.6.orig/arch/blackfin/kernel/perf_event.c
+++ linux-2.6/arch/blackfin/kernel/perf_event.c
@@ -461,29 +461,13 @@ static struct pmu pmu = {
 	.read        = bfin_pmu_read,
 };
 
-static void bfin_pmu_setup(int cpu)
+int __cpuinit bfin_pmu_prepare_cpu(unsigned int cpu)
 {
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 
+	bfin_write_PFCTL(0);
 	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
-}
-
-static int __cpuinit
-bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		bfin_write_PFCTL(0);
-		bfin_pmu_setup(cpu);
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 static int __init bfin_pmu_init(void)
@@ -492,8 +476,7 @@ static int __init bfin_pmu_init(void)
 
 	ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
 	if (!ret)
-		perf_cpu_notifier(bfin_pmu_notifier);
-
+		cpuhp_setup_state(CPUHP_PERF_BFIN, bfin_pmu_prepare_cpu, NULL);
 	return ret;
 }
 early_initcall(bfin_pmu_init);
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -6,6 +6,7 @@ enum cpuhp_states {
 	CPUHP_CREATE_THREADS,
 	CPUHP_PERF_X86_UNCORE_PREP,
 	CPUHP_PERF_X86_PREPARE,
+	CPUHP_PERF_BFIN,
 	CPUHP_PERF_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 17/40] powerpc: perf: Convert book3s notifier to state machine callbacks
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (15 preceding siblings ...)
  2013-01-31 12:11 ` [patch 15/40] x86: perf: Convert AMD IBS to hotplug " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 18/40] s390: perf: Convert the hotplug " Thomas Gleixner
                   ` (24 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: power-cpu-perf-hotplug-convert.patch --]
[-- Type: text/plain, Size: 1890 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/powerpc/perf/core-book3s.c |   29 ++++++-----------------------
 include/linux/cpuhotplug.h      |    1 +
 2 files changed, 7 insertions(+), 23 deletions(-)

Index: linux-2.6/arch/powerpc/perf/core-book3s.c
===================================================================
--- linux-2.6.orig/arch/powerpc/perf/core-book3s.c
+++ linux-2.6/arch/powerpc/perf/core-book3s.c
@@ -1501,31 +1501,15 @@ static void perf_event_interrupt(struct 
 		irq_exit();
 }
 
-static void power_pmu_setup(int cpu)
+int __cpuinit power_pmu_prepare_cpu(unsigned int cpu)
 {
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 
-	if (!ppmu)
-		return;
-	memset(cpuhw, 0, sizeof(*cpuhw));
-	cpuhw->mmcr[0] = MMCR0_FC;
-}
-
-static int __cpuinit
-power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		power_pmu_setup(cpu);
-		break;
-
-	default:
-		break;
+	if (ppmu) {
+		memset(cpuhw, 0, sizeof(*cpuhw));
+		cpuhw->mmcr[0] = MMCR0_FC;
 	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 int __cpuinit register_power_pmu(struct power_pmu *pmu)
@@ -1546,7 +1530,6 @@ int __cpuinit register_power_pmu(struct 
 #endif /* CONFIG_PPC64 */
 
 	perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
-	perf_cpu_notifier(power_pmu_notifier);
-
+	cpuhp_setup_state(CPUHP_PERF_POWER, power_pmu_prepare_cpu, NULL);
 	return 0;
 }
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -7,6 +7,7 @@ enum cpuhp_states {
 	CPUHP_PERF_X86_UNCORE_PREP,
 	CPUHP_PERF_X86_PREPARE,
 	CPUHP_PERF_BFIN,
+	CPUHP_PERF_POWER,
 	CPUHP_PERF_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 18/40] s390: perf: Convert the hotplug notifier to state machine callbacks
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (16 preceding siblings ...)
  2013-01-31 12:11 ` [patch 17/40] powerpc: perf: Convert book3s notifier to state machine callbacks Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 19/40] sh: perf: Convert the hotplug notifiers " Thomas Gleixner
                   ` (23 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: s390-perf-cpu-hotplug.patch --]
[-- Type: text/plain, Size: 2425 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/s390/kernel/perf_cpum_cf.c |   37 +++++++++++++++----------------------
 include/linux/cpuhotplug.h      |    1 +
 2 files changed, 16 insertions(+), 22 deletions(-)

Index: linux-2.6/arch/s390/kernel/perf_cpum_cf.c
===================================================================
--- linux-2.6.orig/arch/s390/kernel/perf_cpum_cf.c
+++ linux-2.6/arch/s390/kernel/perf_cpum_cf.c
@@ -640,26 +640,20 @@ static struct pmu cpumf_pmu = {
 	.cancel_txn   = cpumf_pmu_cancel_txn,
 };
 
-static int __cpuinit cpumf_pmu_notifier(struct notifier_block *self,
-					unsigned long action, void *hcpu)
+static int __cpuinit cpumf_pmf_setup(unsigned int cpu, int flags)
 {
-	unsigned int cpu = (long) hcpu;
-	int flags;
+	smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
+	return 0;
+}
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_ONLINE:
-		flags = PMC_INIT;
-		smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
-		break;
-	case CPU_DOWN_PREPARE:
-		flags = PMC_RELEASE;
-		smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
-		break;
-	default:
-		break;
-	}
+static int __cpuinit s390_pmu_online_cpu(unsigned int cpu)
+{
+	return cpumf_pmf_setup(cpu, PMC_INIT);
+}
 
-	return NOTIFY_OK;
+static int __cpuinit s390_pmu_offline_cpu(unsigned int cpu)
+{
+	return cpumf_pmf_setup(cpu, PMC_RELEASE);
 }
 
 static int __init cpumf_pmu_init(void)
@@ -678,17 +672,16 @@ static int __init cpumf_pmu_init(void)
 	if (rc) {
 		pr_err("Registering for CPU-measurement alerts "
 		       "failed with rc=%i\n", rc);
-		goto out;
+		return rc;
 	}
 
 	rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
 	if (rc) {
 		pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
 		unregister_external_interrupt(0x1407, cpumf_measurement_alert);
-		goto out;
+		return rc;
 	}
-	perf_cpu_notifier(cpumf_pmu_notifier);
-out:
-	return rc;
+	return cpuhp_setup_state(CPUHP_PERF_S390_ONLINE, s390_pmu_online_cpu,
+				 s390_pmu_offline_cpu);
 }
 early_initcall(cpumf_pmu_init);
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -29,6 +29,7 @@ enum cpuhp_states {
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,
 	CPUHP_PERF_X86_ONLINE,
+	CPUHP_PERF_S390_ONLINE,
 	CPUHP_MAX,
 };
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 19/40] sh: perf: Convert the hotplug notifiers to state machine callbacks
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (17 preceding siblings ...)
  2013-01-31 12:11 ` [patch 18/40] s390: perf: Convert the hotplug " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 21/40] sched: Convert the migration callback to hotplug states Thomas Gleixner
                   ` (22 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: sh-perf-notifier-convert.patch --]
[-- Type: text/plain, Size: 1719 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/sh/kernel/perf_event.c |   22 +++-------------------
 include/linux/cpuhotplug.h  |    1 +
 2 files changed, 4 insertions(+), 19 deletions(-)

Index: linux-2.6/arch/sh/kernel/perf_event.c
===================================================================
--- linux-2.6.orig/arch/sh/kernel/perf_event.c
+++ linux-2.6/arch/sh/kernel/perf_event.c
@@ -360,28 +360,12 @@ static struct pmu pmu = {
 	.read		= sh_pmu_read,
 };
 
-static void sh_pmu_setup(int cpu)
+static int __cpuinit sh_pmu_prepare_cpu(unsigned int cpu)
 {
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 
 	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
-}
-
-static int __cpuinit
-sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		sh_pmu_setup(cpu);
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
@@ -395,6 +379,6 @@ int __cpuinit register_sh_pmu(struct sh_
 	WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-	perf_cpu_notifier(sh_pmu_notifier);
+	cpuhp_setup_state(CPUHP_PERF_SUPERH, sh_pmu_prepare_cpu, NULL);
 	return 0;
 }
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -8,6 +8,7 @@ enum cpuhp_states {
 	CPUHP_PERF_X86_PREPARE,
 	CPUHP_PERF_BFIN,
 	CPUHP_PERF_POWER,
+	CPUHP_PERF_SUPERH,
 	CPUHP_PERF_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 21/40] sched: Convert the migration callback to hotplug states
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (18 preceding siblings ...)
  2013-01-31 12:11 ` [patch 19/40] sh: perf: Convert the hotplug notifiers " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 20/40] perf: Remove perf cpu notifier code Thomas Gleixner
                   ` (21 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-sched-convert-migration.patch --]
[-- Type: text/plain, Size: 6762 bytes --]

It's not clear why the ordering needs to be this way, but for the time
being we just keep the current working state. Want's to be revisited.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |    2 
 include/linux/cpuhotplug.h |   12 +++++
 kernel/cpu.c               |   12 +++++
 kernel/sched/core.c        |  103 +++++++++++++++++++--------------------------
 4 files changed, 68 insertions(+), 61 deletions(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -58,8 +58,6 @@ extern ssize_t arch_print_cpu_modalias(s
  * CPU notifier priorities.
  */
 enum {
-	CPU_PRI_MIGRATION	= 10,
-	/* bring up workqueues before normal notifiers and down after */
 	CPU_PRI_WORKQUEUE_UP	= 5,
 	CPU_PRI_WORKQUEUE_DOWN	= -5,
 };
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -10,6 +10,7 @@ enum cpuhp_states {
 	CPUHP_PERF_POWER,
 	CPUHP_PERF_SUPERH,
 	CPUHP_PERF_PREPARE,
+	CPUHP_SCHED_MIGRATE_PREP,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_SCHED_DEAD,
@@ -21,11 +22,13 @@ enum cpuhp_states {
 	CPUHP_AP_PERF_X86_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
+	CPUHP_AP_SCHED_MIGRATE_DYING,
 	CPUHP_AP_MAX,
 	CPUHP_TEARDOWN_CPU,
 	CPUHP_PERCPU_THREADS,
 	CPUHP_SCHED_ONLINE,
 	CPUHP_PERF_ONLINE,
+	CPUHP_SCHED_MIGRATE_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,
@@ -105,6 +108,15 @@ static inline void cpuhp_remove_state_no
 
 /* Compiled in scheduler hotplug functions */
 int sched_starting_cpu(unsigned int cpu);
+int sched_migration_prepare_cpu(unsigned int cpu);
+int sched_migration_online_cpu(unsigned int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+int sched_migration_dying_cpu(unsigned int cpu);
+int sched_migration_dead_cpu(unsigned int cpu);
+#else
+#define sched_migration_dying_cpu	NULL
+#define sched_migration_dead_cpu	NULL
+#endif
 
  /* Performance counter hotplug functions */
 #ifdef CONFIG_PERF_EVENTS
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -754,6 +754,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = perf_event_init_cpu,
 		.teardown = perf_event_exit_cpu,
 	},
+	[CPUHP_SCHED_MIGRATE_PREP] = {
+		.startup = sched_migration_prepare_cpu,
+		.teardown = sched_migration_dead_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
@@ -778,6 +782,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = perf_event_init_cpu,
 		.teardown = perf_event_exit_cpu,
 	},
+	[CPUHP_SCHED_MIGRATE_ONLINE] = {
+		.startup = sched_migration_online_cpu,
+		.teardown = NULL,
+	},
 	[CPUHP_NOTIFY_ONLINE] = {
 		.startup = notify_online,
 		.teardown = NULL,
@@ -808,6 +816,10 @@ static struct cpuhp_step cpuhp_ap_states
 		.startup = NULL,
 		.teardown = notify_dying,
 	},
+	[CPUHP_AP_SCHED_MIGRATE_DYING] = {
+		.startup = NULL,
+		.teardown = sched_migration_dying_cpu,
+	},
 #endif
 	[CPUHP_MAX] = {
 		.startup = NULL,
Index: linux-2.6/kernel/sched/core.c
===================================================================
--- linux-2.6.orig/kernel/sched/core.c
+++ linux-2.6/kernel/sched/core.c
@@ -5104,80 +5104,65 @@ static void set_rq_offline(struct rq *rq
 	}
 }
 
-/*
- * migration_call - callback that gets triggered when a CPU is added.
- * Here we can start up the necessary migration thread for the new CPU.
- */
-static int __cpuinit
-migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+int __cpuinit sched_migration_prepare_cpu(unsigned int cpu)
 {
-	int cpu = (long)hcpu;
-	unsigned long flags;
 	struct rq *rq = cpu_rq(cpu);
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-
-	case CPU_UP_PREPARE:
-		rq->calc_load_update = calc_load_update;
-		break;
+	rq->calc_load_update = calc_load_update;
+	update_max_interval();
+	return 0;
+}
 
-	case CPU_ONLINE:
-		/* Update our root-domain */
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		if (rq->rd) {
-			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+int __cpuinit sched_migration_online_cpu(unsigned int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
 
-			set_rq_online(rq);
-		}
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-		break;
+	/* Update our root-domain */
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (rq->rd) {
+		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+		set_rq_online(rq);
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+	update_max_interval();
+	return 0;
+}
 
 #ifdef CONFIG_HOTPLUG_CPU
-	case CPU_DYING:
-		sched_ttwu_pending();
-		/* Update our root-domain */
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		if (rq->rd) {
-			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-			set_rq_offline(rq);
-		}
-		migrate_tasks(cpu);
-		BUG_ON(rq->nr_running != 1); /* the migration thread */
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-		break;
+int __cpuinit sched_migration_dying_cpu(unsigned int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
 
-	case CPU_DEAD:
-		calc_load_migrate(rq);
-		break;
-#endif
+	sched_ttwu_pending();
+	/* Update our root-domain */
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (rq->rd) {
+		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+		set_rq_offline(rq);
 	}
-
+	migrate_tasks(cpu);
+	BUG_ON(rq->nr_running != 1); /* the migration thread */
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
 	update_max_interval();
-
-	return NOTIFY_OK;
+	return 0;
 }
 
-/*
- * Register at high priority so that task migration (migrate_all_tasks)
- * happens before everything else.  This has to be lower priority than
- * the notifier in the perf_event subsystem, though.
- */
-static struct notifier_block __cpuinitdata migration_notifier = {
-	.notifier_call = migration_call,
-	.priority = CPU_PRI_MIGRATION,
-};
-
-static int __init migration_init(void)
+int __cpuinit sched_migration_dead_cpu(unsigned int cpu)
 {
-	void *cpu = (void *)(long)smp_processor_id();
-	int err;
+	struct rq *rq = cpu_rq(cpu);
 
-	/* Initialize migration for the boot CPU */
-	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
-	BUG_ON(err == NOTIFY_BAD);
-	migration_call(&migration_notifier, CPU_ONLINE, cpu);
-	register_cpu_notifier(&migration_notifier);
+	calc_load_migrate(rq);
+	update_max_interval();
+	return 0;
+}
+#endif
 
+static int __init migration_init(void)
+{
+	sched_migration_prepare_cpu(smp_processor_id());
+	sched_migration_online_cpu(smp_processor_id());
 	return 0;
 }
 early_initcall(migration_init);



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 20/40] perf: Remove perf cpu notifier code
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (19 preceding siblings ...)
  2013-01-31 12:11 ` [patch 21/40] sched: Convert the migration callback to hotplug states Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 22/40] workqueue: Convert to state machine callbacks Thomas Gleixner
                   ` (20 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: perf-remove-perf-cpu-notifier.patch --]
[-- Type: text/plain, Size: 1890 bytes --]

All users converted to state machine callbacks.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |    2 --
 include/linux/perf_event.h |   21 ---------------------
 2 files changed, 23 deletions(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -58,8 +58,6 @@ extern ssize_t arch_print_cpu_modalias(s
  * CPU notifier priorities.
  */
 enum {
-	/* migration should happen before other stuff but after perf */
-	CPU_PRI_PERF		= 20,
 	CPU_PRI_MIGRATION	= 10,
 	/* bring up workqueues before normal notifiers and down after */
 	CPU_PRI_WORKQUEUE_UP	= 5,
Index: linux-2.6/include/linux/perf_event.h
===================================================================
--- linux-2.6.orig/include/linux/perf_event.h
+++ linux-2.6/include/linux/perf_event.h
@@ -796,27 +796,6 @@ static inline void perf_event_task_tick(
 
 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
 
-/*
- * This has to have a higher priority than migration_notifier in sched.c.
- */
-#define perf_cpu_notifier(fn)						\
-do {									\
-	static struct notifier_block fn##_nb __cpuinitdata =		\
-		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
-	unsigned long cpu = smp_processor_id();				\
-	unsigned long flags;						\
-	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
-		(void *)(unsigned long)cpu);				\
-	local_irq_save(flags);						\
-	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
-		(void *)(unsigned long)cpu);				\
-	local_irq_restore(flags);					\
-	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
-		(void *)(unsigned long)cpu);				\
-	register_cpu_notifier(&fn##_nb);				\
-} while (0)
-
-
 #define PMU_FORMAT_ATTR(_name, _format)					\
 static ssize_t								\
 _name##_show(struct device *dev,					\



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 22/40] workqueue: Convert to state machine callbacks
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (20 preceding siblings ...)
  2013-01-31 12:11 ` [patch 20/40] perf: Remove perf cpu notifier code Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 23/40] cpufreq: Convert to hotplug state machine Thomas Gleixner
                   ` (19 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpu-hotplug-workqueue-convert.patch --]
[-- Type: text/plain, Size: 5832 bytes --]

Get rid of the prio ordering of the separate notifiers and use a
proper state callback pair.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |    8 ----
 include/linux/cpuhotplug.h |    7 +++
 kernel/cpu.c               |    8 ++++
 kernel/workqueue.c         |   80 ++++++++++++++++-----------------------------
 4 files changed, 44 insertions(+), 59 deletions(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -54,14 +54,6 @@ extern ssize_t arch_print_cpu_modalias(s
 				       char *bufptr);
 #endif
 
-/*
- * CPU notifier priorities.
- */
-enum {
-	CPU_PRI_WORKQUEUE_UP	= 5,
-	CPU_PRI_WORKQUEUE_DOWN	= -5,
-};
-
 #define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */
 #define CPU_UP_PREPARE		0x0003 /* CPU (unsigned)v coming up */
 #define CPU_UP_CANCELED		0x0004 /* CPU (unsigned)v NOT coming up */
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -11,6 +11,7 @@ enum cpuhp_states {
 	CPUHP_PERF_SUPERH,
 	CPUHP_PERF_PREPARE,
 	CPUHP_SCHED_MIGRATE_PREP,
+	CPUHP_WORKQUEUE_PREP,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_SCHED_DEAD,
@@ -29,6 +30,7 @@ enum cpuhp_states {
 	CPUHP_SCHED_ONLINE,
 	CPUHP_PERF_ONLINE,
 	CPUHP_SCHED_MIGRATE_ONLINE,
+	CPUHP_WORKQUEUE_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,
@@ -127,4 +129,9 @@ int perf_event_exit_cpu(unsigned int cpu
 #define perf_event_exit_cpu	NULL
 #endif
 
+/* Workqueue related hotplug events */
+int workqueue_prepare_cpu(unsigned int cpu);
+int workqueue_online_cpu(unsigned int cpu);
+int workqueue_offline_cpu(unsigned int cpu);
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -758,6 +758,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = sched_migration_prepare_cpu,
 		.teardown = sched_migration_dead_cpu,
 	},
+	[CPUHP_WORKQUEUE_PREP] = {
+		.startup = workqueue_prepare_cpu,
+		.teardown = NULL,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
@@ -786,6 +790,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = sched_migration_online_cpu,
 		.teardown = NULL,
 	},
+	[CPUHP_WORKQUEUE_ONLINE] = {
+		.startup = workqueue_online_cpu,
+		.teardown = workqueue_offline_cpu,
+	},
 	[CPUHP_NOTIFY_ONLINE] = {
 		.startup = notify_online,
 		.teardown = NULL,
Index: linux-2.6/kernel/workqueue.c
===================================================================
--- linux-2.6.orig/kernel/workqueue.c
+++ linux-2.6/kernel/workqueue.c
@@ -3588,67 +3588,48 @@ static void gcwq_unbind_fn(struct work_s
 		atomic_set(get_pool_nr_running(pool), 0);
 }
 
-/*
- * Workqueues should be brought up before normal priority CPU notifiers.
- * This will be registered high priority CPU notifier.
- */
-static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
-					       unsigned long action,
-					       void *hcpu)
+int __cpuinit workqueue_prepare_cpu(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
 	struct global_cwq *gcwq = get_gcwq(cpu);
 	struct worker_pool *pool;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		for_each_worker_pool(pool, gcwq) {
-			struct worker *worker;
-
-			if (pool->nr_workers)
-				continue;
+	for_each_worker_pool(pool, gcwq) {
+		struct worker *worker;
 
-			worker = create_worker(pool);
-			if (!worker)
-				return NOTIFY_BAD;
+		if (pool->nr_workers)
+			continue;
 
-			spin_lock_irq(&gcwq->lock);
-			start_worker(worker);
-			spin_unlock_irq(&gcwq->lock);
-		}
-		break;
+		worker = create_worker(pool);
+		if (!worker)
+			return -ENOMEM;
 
-	case CPU_DOWN_FAILED:
-	case CPU_ONLINE:
-		gcwq_claim_assoc_and_lock(gcwq);
-		gcwq->flags &= ~GCWQ_DISASSOCIATED;
-		rebind_workers(gcwq);
-		gcwq_release_assoc_and_unlock(gcwq);
-		break;
+		spin_lock_irq(&gcwq->lock);
+		start_worker(worker);
+		spin_unlock_irq(&gcwq->lock);
 	}
-	return NOTIFY_OK;
+	return 0;
 }
 
-/*
- * Workqueues should be brought down after normal priority CPU notifiers.
- * This will be registered as low priority CPU notifier.
- */
-static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
-						 unsigned long action,
-						 void *hcpu)
+int __cpuinit workqueue_online_cpu(unsigned int cpu)
+{
+	struct global_cwq *gcwq = get_gcwq(cpu);
+
+	gcwq_claim_assoc_and_lock(gcwq);
+	gcwq->flags &= ~GCWQ_DISASSOCIATED;
+	rebind_workers(gcwq);
+	gcwq_release_assoc_and_unlock(gcwq);
+	return 0;
+}
+
+int __cpuinit workqueue_offline_cpu(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
 	struct work_struct unbind_work;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_PREPARE:
-		/* unbinding should happen on the local CPU */
-		INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
-		queue_work_on(cpu, system_highpri_wq, &unbind_work);
-		flush_work(&unbind_work);
-		break;
-	}
-	return NOTIFY_OK;
+	/* unbinding should happen on the local CPU */
+	INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
+	queue_work_on(cpu, system_highpri_wq, &unbind_work);
+	flush_work(&unbind_work);
+	return 0;
 }
 
 #ifdef CONFIG_SMP
@@ -3837,9 +3818,6 @@ static int __init init_workqueues(void)
 	BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
 		     WORK_CPU_LAST);
 
-	cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
-	hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
-
 	/* initialize gcwqs */
 	for_each_gcwq_cpu(cpu) {
 		struct global_cwq *gcwq = get_gcwq(cpu);



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 23/40] cpufreq: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (21 preceding siblings ...)
  2013-01-31 12:11 ` [patch 22/40] workqueue: Convert to state machine callbacks Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 24/40] arm64: Convert generic timers " Thomas Gleixner
                   ` (18 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: drivers-cpufreq-convert.patch --]
[-- Type: text/plain, Size: 4552 bytes --]

Straight forward conversion to state machine callbacks w/o fixing the
obvious brokeness of the asymetric state invocations.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 drivers/cpufreq/cpufreq_stats.c |   55 +++++++++-------------------------------
 include/linux/cpuhotplug.h      |    2 +
 2 files changed, 15 insertions(+), 42 deletions(-)

Index: linux-2.6/drivers/cpufreq/cpufreq_stats.c
===================================================================
--- linux-2.6.orig/drivers/cpufreq/cpufreq_stats.c
+++ linux-2.6/drivers/cpufreq/cpufreq_stats.c
@@ -167,7 +167,7 @@ static int freq_table_get_index(struct c
 /* should be called late in the CPU removal sequence so that the stats
  * memory is still available in case someone tries to use it.
  */
-static void cpufreq_stats_free_table(unsigned int cpu)
+static int cpufreq_stats_free_table(unsigned int cpu)
 {
 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
 	if (stat) {
@@ -175,18 +175,20 @@ static void cpufreq_stats_free_table(uns
 		kfree(stat);
 	}
 	per_cpu(cpufreq_stats_table, cpu) = NULL;
+	return 0;
 }
 
 /* must be called early in the CPU removal sequence (before
  * cpufreq_remove_dev) so that policy is still valid.
  */
-static void cpufreq_stats_free_sysfs(unsigned int cpu)
+static int cpufreq_stats_free_sysfs(unsigned int cpu)
 {
 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 	if (policy && policy->cpu == cpu)
 		sysfs_remove_group(&policy->kobj, &stats_attr_group);
 	if (policy)
 		cpufreq_cpu_put(policy);
+	return 0;
 }
 
 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
@@ -316,35 +318,6 @@ static int cpufreq_stat_notifier_trans(s
 	return 0;
 }
 
-static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
-					       unsigned long action,
-					       void *hcpu)
-{
-	unsigned int cpu = (unsigned long)hcpu;
-
-	switch (action) {
-	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
-		cpufreq_update_policy(cpu);
-		break;
-	case CPU_DOWN_PREPARE:
-	case CPU_DOWN_PREPARE_FROZEN:
-		cpufreq_stats_free_sysfs(cpu);
-		break;
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		cpufreq_stats_free_table(cpu);
-		break;
-	}
-	return NOTIFY_OK;
-}
-
-/* priority=1 so this will get called before cpufreq_remove_dev */
-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
-	.notifier_call = cpufreq_stat_cpu_callback,
-	.priority = 1,
-};
-
 static struct notifier_block notifier_policy_block = {
 	.notifier_call = cpufreq_stat_notifier_policy
 };
@@ -364,18 +337,19 @@ static int __init cpufreq_stats_init(voi
 	if (ret)
 		return ret;
 
-	register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
-	for_each_online_cpu(cpu)
-		cpufreq_update_policy(cpu);
+	/* Install callbacks. Core will call them for each online cpu */
+	cpuhp_setup_state(CPUHP_CPUFREQ_DEAD, NULL, cpufreq_stats_free_table);
+	/* CHECKME: This is pretty broken versus failures in up/down! */
+	cpuhp_setup_state(CPUHP_CPUFREQ_ONLINE, cpufreq_update_policy,
+			  cpufreq_stats_free_sysfs);
 
 	ret = cpufreq_register_notifier(&notifier_trans_block,
 				CPUFREQ_TRANSITION_NOTIFIER);
 	if (ret) {
 		cpufreq_unregister_notifier(&notifier_policy_block,
 				CPUFREQ_POLICY_NOTIFIER);
-		unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
-		for_each_online_cpu(cpu)
-			cpufreq_stats_free_table(cpu);
+		cpuhp_uninstall_callbacks(cpufreq_stats_cbs,
+					  ARRAY_SIZE(cpufreq_stats_cbs));
 		return ret;
 	}
 
@@ -389,11 +363,8 @@ static void __exit cpufreq_stats_exit(vo
 			CPUFREQ_POLICY_NOTIFIER);
 	cpufreq_unregister_notifier(&notifier_trans_block,
 			CPUFREQ_TRANSITION_NOTIFIER);
-	unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
-	for_each_online_cpu(cpu) {
-		cpufreq_stats_free_table(cpu);
-		cpufreq_stats_free_sysfs(cpu);
-	}
+	cpuhp_uninstall_callbacks(cpufreq_stats_cbs,
+				  ARRAY_SIZE(cpufreq_stats_cbs));
 }
 
 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -14,6 +14,7 @@ enum cpuhp_states {
 	CPUHP_WORKQUEUE_PREP,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
+	CPUHP_CPUFREQ_DEAD,
 	CPUHP_SCHED_DEAD,
 	CPUHP_BRINGUP_CPU,
 	CPUHP_AP_OFFLINE,
@@ -31,6 +32,7 @@ enum cpuhp_states {
 	CPUHP_PERF_ONLINE,
 	CPUHP_SCHED_MIGRATE_ONLINE,
 	CPUHP_WORKQUEUE_ONLINE,
+	CPUHP_CPUFREQ_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 24/40] arm64: Convert generic timers to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (22 preceding siblings ...)
  2013-01-31 12:11 ` [patch 23/40] cpufreq: Convert to hotplug state machine Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 25/40] arm: Convert VFP hotplug notifiers to " Thomas Gleixner
                   ` (17 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: arm64-generic-timer-convert.patch --]
[-- Type: text/plain, Size: 2991 bytes --]

Straight forward replacement.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 drivers/clocksource/arm_generic.c |   40 +++++++++++---------------------------
 include/linux/cpuhotplug.h        |    1 
 2 files changed, 13 insertions(+), 28 deletions(-)

Index: linux-2.6/drivers/clocksource/arm_generic.c
===================================================================
--- linux-2.6.orig/drivers/clocksource/arm_generic.c
+++ linux-2.6/drivers/clocksource/arm_generic.c
@@ -91,8 +91,10 @@ static int arch_timer_set_next_event(uns
 	return 0;
 }
 
-static void __cpuinit arch_timer_setup(struct clock_event_device *clk)
+static int __cpuinit arch_timer_cpu_starting(unsigned int cpu)
 {
+	struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu);
+
 	/* Let's make sure the timer is off before doing anything else */
 	arch_timer_stop();
 
@@ -157,34 +159,17 @@ unsigned long long notrace sched_clock(v
 	return arch_counter_get_cntvct() * sched_clock_mult;
 }
 
-static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
-					   unsigned long action, void *hcpu)
+
+static int __cpuinit arch_timer_dying_cpu(unsigned int cpu)
 {
-	int cpu = (long)hcpu;
 	struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu);
 
-	switch(action) {
-	case CPU_STARTING:
-	case CPU_STARTING_FROZEN:
-		arch_timer_setup(clk);
-		break;
-
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
-			 clk->irq, cpu);
-		disable_percpu_irq(clk->irq);
-		arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
-		break;
-	}
-
-	return NOTIFY_OK;
+	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", clk->irq, cpu);
+	disable_percpu_irq(clk->irq);
+	arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+	return 0;
 }
 
-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
-	.notifier_call = arch_timer_cpu_notify,
-};
-
 static const struct of_device_id arch_timer_of_match[] __initconst = {
 	{ .compatible = "arm,armv8-timer" },
 	{},
@@ -223,10 +208,9 @@ int __init arm_generic_timer_init(void)
 	/* Calibrate the delay loop directly */
 	lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ);
 
-	/* Immediately configure the timer on the boot CPU */
-	arch_timer_setup(this_cpu_ptr(&arch_timer_evt));
-
-	register_cpu_notifier(&arch_timer_cpu_nb);
+	/* Register and immediately configure the timer on the boot CPU */
+	return cpuhp_setup_state(CPUHP_AP_ARM64_TIMER_STARTING,
+				 arch_timer_starting_cpu, arch_timer_dying_cpu);
 
 	return 0;
 }
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -22,6 +22,7 @@ enum cpuhp_states {
 	CPUHP_AP_PERF_X86_UNCORE_STARTING,
 	CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
 	CPUHP_AP_PERF_X86_STARTING,
+	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_SCHED_MIGRATE_DYING,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 25/40] arm: Convert VFP hotplug notifiers to state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (23 preceding siblings ...)
  2013-01-31 12:11 ` [patch 24/40] arm64: Convert generic timers " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 26/40] arm: perf: Convert to hotplug " Thomas Gleixner
                   ` (16 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: arm-vfp-convert.patch --]
[-- Type: text/plain, Size: 2855 bytes --]

Straight forward conversion plus commentry why code which is executed
in hotplug callbacks needs to be invoked before installing them.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/arm/vfp/vfpmodule.c   |   29 +++++++++++++++++------------
 include/linux/cpuhotplug.h |    1 +
 2 files changed, 18 insertions(+), 12 deletions(-)

Index: linux-2.6/arch/arm/vfp/vfpmodule.c
===================================================================
--- linux-2.6.orig/arch/arm/vfp/vfpmodule.c
+++ linux-2.6/arch/arm/vfp/vfpmodule.c
@@ -633,19 +633,19 @@ int vfp_restore_user_hwstate(struct user
  * hardware state at every thread switch.  We clear our held state when
  * a CPU has been killed, indicating that the VFP hardware doesn't contain
  * a threads VFP state.  When a CPU starts up, we re-enable access to the
- * VFP hardware.
- *
- * Both CPU_DYING and CPU_STARTING are called on the CPU which
+ * VFP hardware. The callbacks below are called on the CPU which
  * is being offlined/onlined.
  */
-static int vfp_hotplug(struct notifier_block *b, unsigned long action,
-	void *hcpu)
+static int __cpuinit vfp_dying_cpu(unsigned int cpu)
 {
-	if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
-		vfp_force_reload((long)hcpu, current_thread_info());
-	} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
-		vfp_enable(NULL);
-	return NOTIFY_OK;
+	vfp_force_reload(cpu, current_thread_info());
+	return 0;
+}
+
+static int __cpuinit vfp_starting_cpu(unsigned int unused)
+{
+	vfp_enable(NULL);
+	return 0;
 }
 
 /*
@@ -653,9 +653,13 @@ static int vfp_hotplug(struct notifier_b
  */
 static int __init vfp_init(void)
 {
-	unsigned int vfpsid;
 	unsigned int cpu_arch = cpu_architecture();
+	unsigned int vfpsid;
 
+	/*
+	 * Enable the access to the VFP on all online cpus so the
+	 * following test on FPSID will succeed.
+	 */
 	if (cpu_arch >= CPU_ARCH_ARMv6)
 		on_each_cpu(vfp_enable, NULL, 1);
 
@@ -676,7 +680,8 @@ static int __init vfp_init(void)
 	else if (vfpsid & FPSID_NODOUBLE) {
 		pr_cont("no double precision support\n");
 	} else {
-		hotcpu_notifier(vfp_hotplug, 0);
+		cpuhp_setup_state_nocall(CPUHP_AP_ARM_VFP_STARTING,
+					 vfp_starting_cpu, vfp_dying_cpu);
 
 		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
 		pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -22,6 +22,7 @@ enum cpuhp_states {
 	CPUHP_AP_PERF_X86_UNCORE_STARTING,
 	CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
 	CPUHP_AP_PERF_X86_STARTING,
+	CPUHP_AP_ARM_VFP_STARTING,
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 27/40] virt: Convert kvm hotplug to state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (25 preceding siblings ...)
  2013-01-31 12:11 ` [patch 26/40] arm: perf: Convert to hotplug " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 28/40] cpuhotplug: Remove CPU_STARTING notifier Thomas Gleixner
                   ` (14 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: virt-kvm-convert.patch --]
[-- Type: text/plain, Size: 3035 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |    1 +
 virt/kvm/kvm_main.c        |   42 ++++++++++++++++--------------------------
 2 files changed, 17 insertions(+), 26 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -25,6 +25,7 @@ enum cpuhp_states {
 	CPUHP_AP_PERF_ARM_STARTING,
 	CPUHP_AP_ARM_VFP_STARTING,
 	CPUHP_AP_ARM64_TIMER_STARTING,
+	CPUHP_AP_KVM_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_SCHED_MIGRATE_DYING,
Index: linux-2.6/virt/kvm/kvm_main.c
===================================================================
--- linux-2.6.orig/virt/kvm/kvm_main.c
+++ linux-2.6/virt/kvm/kvm_main.c
@@ -2496,30 +2496,23 @@ static int hardware_enable_all(void)
 	return r;
 }
 
-static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
-			   void *v)
+static int kvm_starting_cpu(unsigned int cpu)
 {
-	int cpu = (long)v;
-
-	if (!kvm_usage_count)
-		return NOTIFY_OK;
-
-	val &= ~CPU_TASKS_FROZEN;
-	switch (val) {
-	case CPU_DYING:
-		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
-		       cpu);
-		hardware_disable(NULL);
-		break;
-	case CPU_STARTING:
-		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
-		       cpu);
+	if (kvm_usage_count) {
+		pr_info("kvm: enabling virtualization on CPU%u\n", cpu);
 		hardware_enable(NULL);
-		break;
 	}
-	return NOTIFY_OK;
+	return 0;
 }
 
+static int kvm_dying_cpu(unsigned int cpu)
+{
+	if (kvm_usage_count) {
+		pr_info("kvm: disabling virtualization on CPU%u\n", cpu);
+		hardware_disable(NULL);
+	}
+	return 0;
+}
 
 asmlinkage void kvm_spurious_fault(void)
 {
@@ -2725,10 +2718,6 @@ int kvm_io_bus_unregister_dev(struct kvm
 	return r;
 }
 
-static struct notifier_block kvm_cpu_notifier = {
-	.notifier_call = kvm_cpu_hotplug,
-};
-
 static int vm_stat_get(void *_offset, u64 *val)
 {
 	unsigned offset = (long)_offset;
@@ -2870,7 +2859,8 @@ int kvm_init(void *opaque, unsigned vcpu
 			goto out_free_1;
 	}
 
-	r = register_cpu_notifier(&kvm_cpu_notifier);
+	r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, kvm_starting_cpu,
+				      kvm_dying_cpu);
 	if (r)
 		goto out_free_2;
 	register_reboot_notifier(&kvm_reboot_notifier);
@@ -2920,7 +2910,7 @@ out_free:
 	kmem_cache_destroy(kvm_vcpu_cache);
 out_free_3:
 	unregister_reboot_notifier(&kvm_reboot_notifier);
-	unregister_cpu_notifier(&kvm_cpu_notifier);
+	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
 out_free_2:
 out_free_1:
 	kvm_arch_hardware_unsetup();
@@ -2941,7 +2931,7 @@ void kvm_exit(void)
 	kvm_async_pf_deinit();
 	unregister_syscore_ops(&kvm_syscore_ops);
 	unregister_reboot_notifier(&kvm_reboot_notifier);
-	unregister_cpu_notifier(&kvm_cpu_notifier);
+	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
 	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	kvm_arch_hardware_unsetup();
 	kvm_arch_exit();



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 26/40] arm: perf: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (24 preceding siblings ...)
  2013-01-31 12:11 ` [patch 25/40] arm: Convert VFP hotplug notifiers to " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 27/40] virt: Convert kvm hotplug to " Thomas Gleixner
                   ` (15 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: arm-perf-convert.patch --]
[-- Type: text/plain, Size: 2232 bytes --]

Straight forward conversion w/o bells and whistles.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/arm/kernel/perf_event_cpu.c |   28 +++++-----------------------
 include/linux/cpuhotplug.h       |    1 +
 2 files changed, 6 insertions(+), 23 deletions(-)

Index: linux-2.6/arch/arm/kernel/perf_event_cpu.c
===================================================================
--- linux-2.6.orig/arch/arm/kernel/perf_event_cpu.c
+++ linux-2.6/arch/arm/kernel/perf_event_cpu.c
@@ -157,24 +157,13 @@ static void cpu_pmu_init(struct arm_pmu 
  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
  * junk values out of them.
  */
-static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
-				    unsigned long action, void *hcpu)
+static int __cpuinit arm_perf_starting_cpu(unsigned int cpu)
 {
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
-
 	if (cpu_pmu && cpu_pmu->reset)
 		cpu_pmu->reset(cpu_pmu);
-	else
-		return NOTIFY_DONE;
-
-	return NOTIFY_OK;
+	return 0;
 }
 
-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
-	.notifier_call = cpu_pmu_notify,
-};
-
 /*
  * PMU platform driver and devicetree bindings.
  */
@@ -304,16 +293,9 @@ static struct platform_driver cpu_pmu_dr
 
 static int __init register_pmu_driver(void)
 {
-	int err;
-
-	err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
-	if (err)
-		return err;
-
-	err = platform_driver_register(&cpu_pmu_driver);
-	if (err)
-		unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+	int err = platform_driver_register(&cpu_pmu_driver);
 
-	return err;
+	return err ? err : cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+						     arm_perf_starting_cpu, NULL);
 }
 device_initcall(register_pmu_driver);
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -22,6 +22,7 @@ enum cpuhp_states {
 	CPUHP_AP_PERF_X86_UNCORE_STARTING,
 	CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
 	CPUHP_AP_PERF_X86_STARTING,
+	CPUHP_AP_PERF_ARM_STARTING,
 	CPUHP_AP_ARM_VFP_STARTING,
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 28/40] cpuhotplug: Remove CPU_STARTING notifier
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (26 preceding siblings ...)
  2013-01-31 12:11 ` [patch 27/40] virt: Convert kvm hotplug to " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 29/40] s390: Convert vtime to hotplug state machine Thomas Gleixner
                   ` (13 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-remove-cpu-starting-notifier.patch --]
[-- Type: text/plain, Size: 2881 bytes --]

All users converted to state machine.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |    6 ------
 include/linux/cpuhotplug.h |    1 -
 kernel/cpu.c               |   13 +------------
 3 files changed, 1 insertion(+), 19 deletions(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -67,10 +67,6 @@ extern ssize_t arch_print_cpu_modalias(s
 					* sleep, must not fail */
 #define CPU_POST_DEAD		0x0009 /* CPU (unsigned)v dead, cpu_hotplug
 					* lock is dropped */
-#define CPU_STARTING		0x000A /* CPU (unsigned)v soon running.
-					* Called on the new cpu, just before
-					* enabling interrupts. Must not sleep,
-					* must not fail */
 
 /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
  * operation in progress
@@ -84,8 +80,6 @@ extern ssize_t arch_print_cpu_modalias(s
 #define CPU_DOWN_FAILED_FROZEN	(CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
 #define CPU_DEAD_FROZEN		(CPU_DEAD | CPU_TASKS_FROZEN)
 #define CPU_DYING_FROZEN	(CPU_DYING | CPU_TASKS_FROZEN)
-#define CPU_STARTING_FROZEN	(CPU_STARTING | CPU_TASKS_FROZEN)
-
 
 #ifdef CONFIG_SMP
 extern bool cpuhp_tasks_frozen;
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -26,7 +26,6 @@ enum cpuhp_states {
 	CPUHP_AP_ARM_VFP_STARTING,
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_KVM_STARTING,
-	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_SCHED_MIGRATE_DYING,
 	CPUHP_AP_MAX,
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -216,12 +216,6 @@ static int bringup_cpu(unsigned int cpu)
 	return 0;
 }
 
-static int notify_starting(unsigned int cpu)
-{
-	cpu_notify(CPU_STARTING, cpu);
-	return 0;
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 EXPORT_SYMBOL(register_cpu_notifier);
 
@@ -446,10 +440,9 @@ EXPORT_SYMBOL(cpu_down);
 #endif /*CONFIG_HOTPLUG_CPU*/
 
 /**
- * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
+ * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
  * @cpu: cpu that just started
  *
- * This function calls the cpu_chain notifiers with CPU_STARTING.
  * It must be called by the arch code on the new cpu, before the new cpu
  * enables interrupts and before the "boot" cpu returns from __cpu_up().
  */
@@ -816,10 +809,6 @@ static struct cpuhp_step cpuhp_ap_states
 		.startup = sched_starting_cpu,
 		.teardown = NULL,
 	},
-	[CPUHP_AP_NOTIFY_STARTING] = {
-		.startup = notify_starting,
-		.teardown = NULL,
-	},
 	[CPUHP_AP_NOTIFY_DYING] = {
 		.startup = NULL,
 		.teardown = notify_dying,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 29/40] s390: Convert vtime to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (27 preceding siblings ...)
  2013-01-31 12:11 ` [patch 28/40] cpuhotplug: Remove CPU_STARTING notifier Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 30/40] x86: tboot: Convert " Thomas Gleixner
                   ` (12 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: s390-vtime-convert.patch --]
[-- Type: text/plain, Size: 1630 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/s390/kernel/vtime.c   |   18 +++++-------------
 include/linux/cpuhotplug.h |    1 +
 2 files changed, 6 insertions(+), 13 deletions(-)

Index: linux-2.6/arch/s390/kernel/vtime.c
===================================================================
--- linux-2.6.orig/arch/s390/kernel/vtime.c
+++ linux-2.6/arch/s390/kernel/vtime.c
@@ -382,25 +382,17 @@ void __cpuinit init_cpu_vtimer(void)
 	set_vtimer(VTIMER_MAX_SLICE);
 }
 
-static int __cpuinit s390_nohz_notify(struct notifier_block *self,
-				      unsigned long action, void *hcpu)
+static int __cpuinit s390_vtime_dying_cpu(unsigned int cpu)
 {
-	struct s390_idle_data *idle;
-	long cpu = (long) hcpu;
+	struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
 
-	idle = &per_cpu(s390_idle, cpu);
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DYING:
-		idle->nohz_delay = 0;
-	default:
-		break;
-	}
-	return NOTIFY_OK;
+	idle->nohz_delay = 0;
+	return 0;
 }
 
 void __init vtime_init(void)
 {
 	/* Enable cpu timer interrupts on the boot cpu. */
 	init_cpu_vtimer();
-	cpu_notifier(s390_nohz_notify, 0);
+	cpuhp_setup_state(CPUHP_AP_S390_VTIME_DYING, NULL, s390_vtime_dying_cpu);
 }
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -27,6 +27,7 @@ enum cpuhp_states {
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_KVM_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
+	CPUHP_AP_S390_VTIME_DYING,
 	CPUHP_AP_SCHED_MIGRATE_DYING,
 	CPUHP_AP_MAX,
 	CPUHP_TEARDOWN_CPU,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 30/40] x86: tboot: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (28 preceding siblings ...)
  2013-01-31 12:11 ` [patch 29/40] s390: Convert vtime to hotplug state machine Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 31/40] sched: Convert fair nohz balancer " Thomas Gleixner
                   ` (11 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: x86-tboot-convert.patch --]
[-- Type: text/plain, Size: 1968 bytes --]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/tboot.c    |   23 +++++++----------------
 include/linux/cpuhotplug.h |    1 +
 2 files changed, 8 insertions(+), 16 deletions(-)

Index: linux-2.6/arch/x86/kernel/tboot.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/tboot.c
+++ linux-2.6/arch/x86/kernel/tboot.c
@@ -319,25 +319,16 @@ static int tboot_wait_for_aps(int num_ap
 	return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
 }
 
-static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
-			unsigned long action, void *hcpu)
+static int __cpuinit tboot_dying_cpu(unsigned int cpu)
 {
-	switch (action) {
-	case CPU_DYING:
-		atomic_inc(&ap_wfs_count);
-		if (num_online_cpus() == 1)
-			if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
-				return NOTIFY_BAD;
-		break;
+	atomic_inc(&ap_wfs_count);
+	if (num_online_cpus() == 1) {
+		if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
+			return -EBUSY;
 	}
-	return NOTIFY_OK;
+	return 0;
 }
 
-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
-{
-	.notifier_call = tboot_cpu_callback,
-};
-
 static __init int tboot_late_init(void)
 {
 	if (!tboot_enabled())
@@ -346,7 +337,7 @@ static __init int tboot_late_init(void)
 	tboot_create_trampoline();
 
 	atomic_set(&ap_wfs_count, 0);
-	register_hotcpu_notifier(&tboot_cpu_notifier);
+	cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, NULL, tboot_dying_cpu);
 
 	acpi_os_set_prepare_sleep(&tboot_sleep);
 	return 0;
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -27,6 +27,7 @@ enum cpuhp_states {
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_KVM_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
+	CPUHP_AP_X86_TBOOT_DYING,
 	CPUHP_AP_S390_VTIME_DYING,
 	CPUHP_AP_SCHED_MIGRATE_DYING,
 	CPUHP_AP_MAX,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 31/40] sched: Convert fair nohz balancer to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (29 preceding siblings ...)
  2013-01-31 12:11 ` [patch 30/40] x86: tboot: Convert " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 33/40] hrtimer: Convert " Thomas Gleixner
                   ` (10 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-sched-convert-fair-nohz-balancer.patch --]
[-- Type: text/plain, Size: 2984 bytes --]

Straight forward conversion which leaves the question whether this
couldn't be combined with already existing infrastructure in the
scheduler instead of having an extra state.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |    6 ++++++
 kernel/cpu.c               |    4 ++++
 kernel/sched/fair.c        |   16 ++--------------
 3 files changed, 12 insertions(+), 14 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -29,6 +29,7 @@ enum cpuhp_states {
 	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_X86_TBOOT_DYING,
 	CPUHP_AP_S390_VTIME_DYING,
+	CPUHP_AP_SCHED_NOHZ_DYING,
 	CPUHP_AP_SCHED_MIGRATE_DYING,
 	CPUHP_AP_MAX,
 	CPUHP_TEARDOWN_CPU,
@@ -126,6 +127,11 @@ int sched_migration_dead_cpu(unsigned in
 #define sched_migration_dying_cpu	NULL
 #define sched_migration_dead_cpu	NULL
 #endif
+#if defined(CONFIG_NO_HZ)
+int nohz_balance_exit_idle(unsigned int cpu);
+#else
+#define nohz_balance_exit_idle		NULL
+#endif
 
  /* Performance counter hotplug functions */
 #ifdef CONFIG_PERF_EVENTS
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -813,6 +813,10 @@ static struct cpuhp_step cpuhp_ap_states
 		.startup = NULL,
 		.teardown = notify_dying,
 	},
+	[CPUHP_AP_SCHED_NOHZ_DYING] = {
+		.startup = NULL,
+		.teardown = nohz_balance_exit_idle,
+	},
 	[CPUHP_AP_SCHED_MIGRATE_DYING] = {
 		.startup = NULL,
 		.teardown = sched_migration_dying_cpu,
Index: linux-2.6/kernel/sched/fair.c
===================================================================
--- linux-2.6.orig/kernel/sched/fair.c
+++ linux-2.6/kernel/sched/fair.c
@@ -5390,13 +5390,14 @@ static void nohz_balancer_kick(int cpu)
 	return;
 }
 
-static inline void nohz_balance_exit_idle(int cpu)
+int nohz_balance_exit_idle(unsigned int cpu)
 {
 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
 		atomic_dec(&nohz.nr_cpus);
 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 	}
+	return 0;
 }
 
 static inline void set_cpu_sd_state_busy(void)
@@ -5448,18 +5449,6 @@ void nohz_balance_enter_idle(int cpu)
 	atomic_inc(&nohz.nr_cpus);
 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 }
-
-static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
-{
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DYING:
-		nohz_balance_exit_idle(smp_processor_id());
-		return NOTIFY_OK;
-	default:
-		return NOTIFY_DONE;
-	}
-}
 #endif
 
 static DEFINE_SPINLOCK(balancing);
@@ -6167,7 +6156,6 @@ __init void init_sched_fair_class(void)
 #ifdef CONFIG_NO_HZ
 	nohz.next_balance = jiffies;
 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
-	cpu_notifier(sched_ilb_notifier, 0);
 #endif
 #endif /* SMP */
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 33/40] hrtimer: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (30 preceding siblings ...)
  2013-01-31 12:11 ` [patch 31/40] sched: Convert fair nohz balancer " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 32/40] rcu: Convert rcutree " Thomas Gleixner
                   ` (9 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: hrtimer-convert-to-statemachine.patch --]
[-- Type: text/plain, Size: 5514 bytes --]

Split out the clockevents callbacks instead of piggypacking them on
hrtimers.

This gets rid of a POST_DEAD user. See commit 54e88fad. We just move
the callback state to the proper place in the state machine.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |   18 +++++++++++++++++
 kernel/cpu.c               |   12 +++++++++++
 kernel/hrtimer.c           |   47 ++++-----------------------------------------
 kernel/time/clockevents.c  |   13 ++++++++++++
 4 files changed, 48 insertions(+), 42 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -13,8 +13,10 @@ enum cpuhp_states {
 	CPUHP_SCHED_MIGRATE_PREP,
 	CPUHP_WORKQUEUE_PREP,
 	CPUHP_RCUTREE_PREPARE,
+	CPUHP_HRTIMERS_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
+	CPUHP_CLOCKEVENTS_DEAD,
 	CPUHP_CPUFREQ_DEAD,
 	CPUHP_SCHED_DEAD,
 	CPUHP_BRINGUP_CPU,
@@ -28,6 +30,7 @@ enum cpuhp_states {
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_KVM_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
+	CPUHP_AP_CLOCKEVENTS_DYING,
 	CPUHP_AP_RCUTREE_DYING,
 	CPUHP_AP_X86_TBOOT_DYING,
 	CPUHP_AP_S390_VTIME_DYING,
@@ -165,4 +168,19 @@ int rcutree_dying_cpu(unsigned int cpu);
 #define rcutree_dying_cpu	NULL
 #endif
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+int clockevents_dying_cpu(unsigned int cpu);
+int clockevents_dead_cpu(unsigned int cpu);
+#else
+#define clockevents_dying_cpu	NULL
+#define clockevents_dead_cpu	NULL
+#endif
+
+int hrtimers_prepare_cpu(unsigned int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+int hrtimers_dead_cpu(unsigned int cpu);
+#else
+#define hrtimers_dead_cpu	NULL
+#endif
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -759,6 +759,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = rcutree_prepare_cpu,
 		.teardown = rcutree_dead_cpu,
 	},
+	[CPUHP_HRTIMERS_PREPARE] = {
+		.startup = hrtimers_prepare_cpu,
+		.teardown = hrtimers_dead_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
@@ -767,6 +771,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = NULL,
 		.teardown = notify_dead,
 	},
+	[CPUHP_CLOCKEVENTS_DEAD] = {
+		.startup = NULL,
+		.teardown = clockevents_dead_cpu,
+	},
 	[CPUHP_BRINGUP_CPU] = {
 		.startup = bringup_cpu,
 		.teardown = NULL,
@@ -821,6 +829,10 @@ static struct cpuhp_step cpuhp_ap_states
 		.startup = NULL,
 		.teardown = notify_dying,
 	},
+	[CPUHP_AP_CLOCKEVENTS_DYING] = {
+		.startup = NULL,
+		.teardown = clockevents_dying_cpu,
+	},
 	[CPUHP_AP_RCUTREE_DYING] = {
 		.startup = NULL,
 		.teardown = rcutree_dying_cpu,
Index: linux-2.6/kernel/hrtimer.c
===================================================================
--- linux-2.6.orig/kernel/hrtimer.c
+++ linux-2.6/kernel/hrtimer.c
@@ -1635,7 +1635,7 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
 /*
  * Functions related to boot-time initialization:
  */
-static void __cpuinit init_hrtimers_cpu(int cpu)
+int __cpuinit hrtimers_prepare_cpu(unsigned int cpu)
 {
 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
 	int i;
@@ -1648,6 +1648,7 @@ static void __cpuinit init_hrtimers_cpu(
 	}
 
 	hrtimer_init_hres(cpu_base);
+	return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -1685,7 +1686,7 @@ static void migrate_hrtimer_list(struct 
 	}
 }
 
-static void migrate_hrtimers(int scpu)
+int __cpuinit hrtimers_dead_cpu(unsigned int scpu)
 {
 	struct hrtimer_cpu_base *old_base, *new_base;
 	int i;
@@ -1714,52 +1715,14 @@ static void migrate_hrtimers(int scpu)
 	/* Check, if we got expired work to do */
 	__hrtimer_peek_ahead_timers();
 	local_irq_enable();
+	return 0;
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
-					unsigned long action, void *hcpu)
-{
-	int scpu = (long)hcpu;
-
-	switch (action) {
-
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		init_hrtimers_cpu(scpu);
-		break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
-		break;
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-	{
-		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
-		migrate_hrtimers(scpu);
-		break;
-	}
-#endif
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata hrtimers_nb = {
-	.notifier_call = hrtimer_cpu_notify,
-};
-
 void __init hrtimers_init(void)
 {
-	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
-			  (void *)(long)smp_processor_id());
-	register_cpu_notifier(&hrtimers_nb);
+	hrtimers_prepare_cpu(smp_processor_id());
 #ifdef CONFIG_HIGH_RES_TIMERS
 	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
 #endif
Index: linux-2.6/kernel/time/clockevents.c
===================================================================
--- linux-2.6.orig/kernel/time/clockevents.c
+++ linux-2.6/kernel/time/clockevents.c
@@ -461,4 +461,17 @@ void clockevents_notify(unsigned long re
 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
 }
 EXPORT_SYMBOL_GPL(clockevents_notify);
+
+int __cpuinit clockevents_dying_cpu(unsigned int cpu)
+{
+	clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &cpu);
+	return 0;
+}
+
+int __cpuinit clockevents_dead_cpu(unsigned int cpu)
+{
+	clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
+	return 0;
+}
+
 #endif



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 32/40] rcu: Convert rcutree to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (31 preceding siblings ...)
  2013-01-31 12:11 ` [patch 33/40] hrtimer: Convert " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-02-12  0:01   ` Paul E. McKenney
  2013-01-31 12:11 ` [patch 34/40] cpuhotplug: Remove CPU_DYING notifier Thomas Gleixner
                   ` (8 subsequent siblings)
  41 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhtoplug-convert-rcutree-to-state-machine.patch --]
[-- Type: text/plain, Size: 6188 bytes --]

Do we really need so many states here ?

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |   18 ++++++++
 kernel/cpu.c               |   12 +++++
 kernel/rcutree.c           |   95 ++++++++++++++++++++-------------------------
 3 files changed, 73 insertions(+), 52 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -12,6 +12,7 @@ enum cpuhp_states {
 	CPUHP_PERF_PREPARE,
 	CPUHP_SCHED_MIGRATE_PREP,
 	CPUHP_WORKQUEUE_PREP,
+	CPUHP_RCUTREE_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_CPUFREQ_DEAD,
@@ -27,6 +28,7 @@ enum cpuhp_states {
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_KVM_STARTING,
 	CPUHP_AP_NOTIFY_DYING,
+	CPUHP_AP_RCUTREE_DYING,
 	CPUHP_AP_X86_TBOOT_DYING,
 	CPUHP_AP_S390_VTIME_DYING,
 	CPUHP_AP_SCHED_NOHZ_DYING,
@@ -39,6 +41,7 @@ enum cpuhp_states {
 	CPUHP_SCHED_MIGRATE_ONLINE,
 	CPUHP_WORKQUEUE_ONLINE,
 	CPUHP_CPUFREQ_ONLINE,
+	CPUHP_RCUTREE_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,
@@ -147,4 +150,19 @@ int workqueue_prepare_cpu(unsigned int c
 int workqueue_online_cpu(unsigned int cpu);
 int workqueue_offline_cpu(unsigned int cpu);
 
+/* RCUtree hotplug events */
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+int rcutree_prepare_cpu(unsigned int cpu);
+int rcutree_online_cpu(unsigned int cpu);
+int rcutree_offline_cpu(unsigned int cpu);
+int rcutree_dead_cpu(unsigned int cpu);
+int rcutree_dying_cpu(unsigned int cpu);
+#else
+#define rcutree_prepare_cpu	NULL
+#define rcutree_online_cpu	NULL
+#define rcutree_offline_cpu	NULL
+#define rcutree_dead_cpu	NULL
+#define rcutree_dying_cpu	NULL
+#endif
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -755,6 +755,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = workqueue_prepare_cpu,
 		.teardown = NULL,
 	},
+	[CPUHP_RCUTREE_PREPARE] = {
+		.startup = rcutree_prepare_cpu,
+		.teardown = rcutree_dead_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
@@ -787,6 +791,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = workqueue_online_cpu,
 		.teardown = workqueue_offline_cpu,
 	},
+	[CPUHP_RCUTREE_ONLINE] = {
+		.startup = rcutree_online_cpu,
+		.teardown = rcutree_offline_cpu,
+	},
 	[CPUHP_NOTIFY_ONLINE] = {
 		.startup = notify_online,
 		.teardown = NULL,
@@ -813,6 +821,10 @@ static struct cpuhp_step cpuhp_ap_states
 		.startup = NULL,
 		.teardown = notify_dying,
 	},
+	[CPUHP_AP_RCUTREE_DYING] = {
+		.startup = NULL,
+		.teardown = rcutree_dying_cpu,
+	},
 	[CPUHP_AP_SCHED_NOHZ_DYING] = {
 		.startup = NULL,
 		.teardown = nohz_balance_exit_idle,
Index: linux-2.6/kernel/rcutree.c
===================================================================
--- linux-2.6.orig/kernel/rcutree.c
+++ linux-2.6/kernel/rcutree.c
@@ -2787,67 +2787,59 @@ rcu_init_percpu_data(int cpu, struct rcu
 	mutex_unlock(&rsp->onoff_mutex);
 }
 
-static void __cpuinit rcu_prepare_cpu(int cpu)
+int __cpuinit rcutree_prepare_cpu(unsigned int cpu)
 {
 	struct rcu_state *rsp;
 
 	for_each_rcu_flavor(rsp)
 		rcu_init_percpu_data(cpu, rsp,
 				     strcmp(rsp->name, "rcu_preempt") == 0);
+	rcu_prepare_kthreads(cpu);
+	return 0;
 }
 
-/*
- * Handle CPU online/offline notification events.
- */
-static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
-				    unsigned long action, void *hcpu)
+int __cpuinit rcutree_dead_cpu(unsigned int cpu)
 {
-	long cpu = (long)hcpu;
-	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
-	struct rcu_node *rnp = rdp->mynode;
 	struct rcu_state *rsp;
-	int ret = NOTIFY_OK;
 
-	trace_rcu_utilization("Start CPU hotplug");
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		rcu_prepare_cpu(cpu);
-		rcu_prepare_kthreads(cpu);
-		break;
-	case CPU_ONLINE:
-	case CPU_DOWN_FAILED:
-		rcu_boost_kthread_setaffinity(rnp, -1);
-		break;
-	case CPU_DOWN_PREPARE:
-		if (nocb_cpu_expendable(cpu))
-			rcu_boost_kthread_setaffinity(rnp, cpu);
-		else
-			ret = NOTIFY_BAD;
-		break;
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		/*
-		 * The whole machine is "stopped" except this CPU, so we can
-		 * touch any data without introducing corruption. We send the
-		 * dying CPU's callbacks to an arbitrarily chosen online CPU.
-		 */
-		for_each_rcu_flavor(rsp)
-			rcu_cleanup_dying_cpu(rsp);
-		rcu_cleanup_after_idle(cpu);
-		break;
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-		for_each_rcu_flavor(rsp)
-			rcu_cleanup_dead_cpu(cpu, rsp);
-		break;
-	default:
-		break;
-	}
-	trace_rcu_utilization("End CPU hotplug");
-	return ret;
+	for_each_rcu_flavor(rsp)
+		rcu_cleanup_dead_cpu(cpu, rsp);
+	return 0;
+}
+
+static void __cpuinit rcutree_affinity_setting(unsigned int cpu, int outgoing)
+{
+	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+
+	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
+}
+
+int __cpuinit rcutree_online_cpu(unsigned int cpu)
+{
+	rcutree_affinity_setting(cpu, -1);
+	return 0;
+}
+
+int __cpuinit rcutree_offline_cpu(unsigned int cpu)
+{
+	if (!nocb_cpu_expendable(cpu))
+		return -EINVAL;
+	rcutree_affinity_setting(cpu, cpu);
+	return 0;
+}
+
+int __cpuinit rcutree_dying_cpu(unsigned int cpu)
+{
+	struct rcu_state *rsp;
+	/*
+	 * The whole machine is "stopped" except this CPU, so we can
+	 * touch any data without introducing corruption. We send the
+	 * dying CPU's callbacks to an arbitrarily chosen online CPU.
+	 */
+	for_each_rcu_flavor(rsp)
+		rcu_cleanup_dying_cpu(rsp);
+	rcu_cleanup_after_idle(cpu);
+	return 0;
 }
 
 /*
@@ -3071,9 +3063,8 @@ void __init rcu_init(void)
 	 * this is called early in boot, before either interrupts
 	 * or the scheduler are operational.
 	 */
-	cpu_notifier(rcu_cpu_notify, 0);
 	for_each_online_cpu(cpu)
-		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+		rcutree_prepare_cpu(cpu);
 	check_cpu_stall_init();
 }
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 34/40] cpuhotplug: Remove CPU_DYING notifier
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (32 preceding siblings ...)
  2013-01-31 12:11 ` [patch 32/40] rcu: Convert rcutree " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 35/40] timers: Convert to hotplug state machine Thomas Gleixner
                   ` (7 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

[-- Attachment #1: cpuhotplug-remove-cpu-dying.patch --]
[-- Type: text/plain, Size: 2707 bytes --]

All users gone.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpu.h        |    6 ------
 include/linux/cpuhotplug.h |    1 -
 kernel/cpu.c               |   11 -----------
 3 files changed, 18 deletions(-)

Index: linux-2.6/include/linux/cpu.h
===================================================================
--- linux-2.6.orig/include/linux/cpu.h
+++ linux-2.6/include/linux/cpu.h
@@ -60,11 +60,6 @@ extern ssize_t arch_print_cpu_modalias(s
 #define CPU_DOWN_PREPARE	0x0005 /* CPU (unsigned)v going down */
 #define CPU_DOWN_FAILED		0x0006 /* CPU (unsigned)v NOT going down */
 #define CPU_DEAD		0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING		0x0008 /* CPU (unsigned)v not running any task,
-					* not handling interrupts, soon dead.
-					* Called on the dying cpu, interrupts
-					* are already disabled. Must not
-					* sleep, must not fail */
 #define CPU_POST_DEAD		0x0009 /* CPU (unsigned)v dead, cpu_hotplug
 					* lock is dropped */
 
@@ -79,7 +74,6 @@ extern ssize_t arch_print_cpu_modalias(s
 #define CPU_DOWN_PREPARE_FROZEN	(CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
 #define CPU_DOWN_FAILED_FROZEN	(CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
 #define CPU_DEAD_FROZEN		(CPU_DEAD | CPU_TASKS_FROZEN)
-#define CPU_DYING_FROZEN	(CPU_DYING | CPU_TASKS_FROZEN)
 
 #ifdef CONFIG_SMP
 extern bool cpuhp_tasks_frozen;
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -29,7 +29,6 @@ enum cpuhp_states {
 	CPUHP_AP_ARM_VFP_STARTING,
 	CPUHP_AP_ARM64_TIMER_STARTING,
 	CPUHP_AP_KVM_STARTING,
-	CPUHP_AP_NOTIFY_DYING,
 	CPUHP_AP_CLOCKEVENTS_DYING,
 	CPUHP_AP_RCUTREE_DYING,
 	CPUHP_AP_X86_TBOOT_DYING,
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -303,12 +303,6 @@ static int notify_down_prepare(unsigned 
 	return err;
 }
 
-static int notify_dying(unsigned int cpu)
-{
-	cpu_notify(CPU_DYING, cpu);
-	return 0;
-}
-
 /* Take this CPU down. */
 static int __ref take_cpu_down(void *_param)
 {
@@ -366,7 +360,6 @@ static int notify_dead(unsigned int cpu)
 #define notify_down_prepare	NULL
 #define takedown_cpu		NULL
 #define notify_dead		NULL
-#define notify_dying		NULL
 #endif
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -825,10 +818,6 @@ static struct cpuhp_step cpuhp_ap_states
 		.startup = sched_starting_cpu,
 		.teardown = NULL,
 	},
-	[CPUHP_AP_NOTIFY_DYING] = {
-		.startup = NULL,
-		.teardown = notify_dying,
-	},
 	[CPUHP_AP_CLOCKEVENTS_DYING] = {
 		.startup = NULL,
 		.teardown = clockevents_dying_cpu,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 35/40] timers: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (33 preceding siblings ...)
  2013-01-31 12:11 ` [patch 34/40] cpuhotplug: Remove CPU_DYING notifier Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 36/40] profile: Convert ot " Thomas Gleixner
                   ` (6 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Richard Weinberger

[-- Attachment #1: 0001-cpu-hotplug-convert-timers.patch --]
[-- Type: text/plain, Size: 3530 bytes --]

From: Richard Weinberger <richard@nod.at>

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |    4 ++++
 kernel/cpu.c               |    4 ++++
 kernel/timer.c             |   43 +++++--------------------------------------
 3 files changed, 13 insertions(+), 38 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -14,6 +14,7 @@ enum cpuhp_states {
 	CPUHP_WORKQUEUE_PREP,
 	CPUHP_RCUTREE_PREPARE,
 	CPUHP_HRTIMERS_PREPARE,
+	CPUHP_TIMERS_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_CLOCKEVENTS_DEAD,
@@ -176,10 +177,13 @@ int clockevents_dead_cpu(unsigned int cp
 #endif
 
 int hrtimers_prepare_cpu(unsigned int cpu);
+int timers_prepare_cpu(unsigned int cpu);
 #ifdef CONFIG_HOTPLUG_CPU
 int hrtimers_dead_cpu(unsigned int cpu);
+int timers_dead_cpu(unsigned int cpu);
 #else
 #define hrtimers_dead_cpu	NULL
+#define timers_dead_cpu		NULL
 #endif
 
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -756,6 +756,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = hrtimers_prepare_cpu,
 		.teardown = hrtimers_dead_cpu,
 	},
+	[CPUHP_TIMERS_PREPARE] = {
+		.startup = timers_prepare_cpu,
+		.teardown = timers_dead_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
Index: linux-2.6/kernel/timer.c
===================================================================
--- linux-2.6.orig/kernel/timer.c
+++ linux-2.6/kernel/timer.c
@@ -1642,7 +1642,7 @@ SYSCALL_DEFINE1(sysinfo, struct sysinfo 
 	return 0;
 }
 
-static int __cpuinit init_timers_cpu(int cpu)
+int __cpuinit timers_prepare_cpu(unsigned int cpu)
 {
 	int j;
 	struct tvec_base *base;
@@ -1714,7 +1714,7 @@ static void migrate_timer_list(struct tv
 	}
 }
 
-static void __cpuinit migrate_timers(int cpu)
+int __cpuinit timers_dead_cpu(unsigned int cpu)
 {
 	struct tvec_base *old_base;
 	struct tvec_base *new_base;
@@ -1744,52 +1744,19 @@ static void __cpuinit migrate_timers(int
 	spin_unlock(&old_base->lock);
 	spin_unlock_irq(&new_base->lock);
 	put_cpu_var(tvec_bases);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-static int __cpuinit timer_cpu_notify(struct notifier_block *self,
-				unsigned long action, void *hcpu)
-{
-	long cpu = (long)hcpu;
-	int err;
 
-	switch(action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		err = init_timers_cpu(cpu);
-		if (err < 0)
-			return notifier_from_errno(err);
-		break;
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		migrate_timers(cpu);
-		break;
-#endif
-	default:
-		break;
-	}
-	return NOTIFY_OK;
+	return 0;
 }
-
-static struct notifier_block __cpuinitdata timers_nb = {
-	.notifier_call	= timer_cpu_notify,
-};
-
+#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init init_timers(void)
 {
-	int err;
-
 	/* ensure there are enough low bits for flags in timer->base pointer */
 	BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
 
-	err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
-			       (void *)(long)smp_processor_id());
 	init_timer_stats();
+	BUG_ON(timers_prepare_cpu(smp_processor_id()));
 
-	BUG_ON(err != NOTIFY_OK);
-	register_cpu_notifier(&timers_nb);
 	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
 



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 36/40] profile: Convert ot hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (34 preceding siblings ...)
  2013-01-31 12:11 ` [patch 35/40] timers: Convert to hotplug state machine Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 37/40] x86: x2apic: Convert to cpu " Thomas Gleixner
                   ` (5 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Richard Weinberger

[-- Attachment #1: 0002-cpu-hotplug-convert-profile.c.patch --]
[-- Type: text/plain, Size: 5645 bytes --]

From: Richard Weinberger <richard@nod.at>

Signed-off-by: Richard Weinberger <richard@nod.at>
---
 include/linux/cpuhotplug.h |   12 +++++
 kernel/cpu.c               |    8 +++
 kernel/profile.c           |   92 +++++++++++++++++++++------------------------
 3 files changed, 63 insertions(+), 49 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -15,6 +15,7 @@ enum cpuhp_states {
 	CPUHP_RCUTREE_PREPARE,
 	CPUHP_HRTIMERS_PREPARE,
 	CPUHP_TIMERS_PREPARE,
+	CPUHP_PROFILE_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_CLOCKEVENTS_DEAD,
@@ -46,6 +47,7 @@ enum cpuhp_states {
 	CPUHP_CPUFREQ_ONLINE,
 	CPUHP_RCUTREE_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
+	CPUHP_PROFILE_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
 	CPUHP_PERF_X86_UNCORE_ONLINE,
 	CPUHP_PERF_X86_ONLINE,
@@ -186,4 +188,14 @@ int timers_dead_cpu(unsigned int cpu);
 #define timers_dead_cpu		NULL
 #endif
 
+#if defined(CONFIG_PROFILING) && defined(CONFIG_HOTPLUG_CPU)
+int profile_prepare_cpu(unsigned int cpu);
+int profile_dead_cpu(unsigned int cpu);
+int profile_online_cpu(unsigned int cpu);
+#else
+#define profile_prepare_cpu	NULL
+#define profile_dead_cpu	NULL
+#define profile_online_cpu	NULL
+#endif
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -760,6 +760,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = timers_prepare_cpu,
 		.teardown = timers_dead_cpu,
 	},
+	[CPUHP_PROFILE_PREPARE] = {
+		.startup = profile_prepare_cpu,
+		.teardown = profile_dead_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
@@ -804,6 +808,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = notify_online,
 		.teardown = NULL,
 	},
+	[CPUHP_PROFILE_ONLINE] = {
+		.startup = profile_online_cpu,
+		.teardown = NULL,
+	},
 	[CPUHP_NOTIFY_DOWN_PREPARE] = {
 		.startup = NULL,
 		.teardown = notify_down_prepare,
Index: linux-2.6/kernel/profile.c
===================================================================
--- linux-2.6.orig/kernel/profile.c
+++ linux-2.6/kernel/profile.c
@@ -353,68 +353,63 @@ out:
 	put_cpu();
 }
 
-static int __cpuinit profile_cpu_callback(struct notifier_block *info,
-					unsigned long action, void *__cpu)
+int __cpuinit profile_dead_cpu(unsigned int cpu)
 {
-	int node, cpu = (unsigned long)__cpu;
 	struct page *page;
+	int i;
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		node = cpu_to_mem(cpu);
-		per_cpu(cpu_profile_flip, cpu) = 0;
-		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
-			page = alloc_pages_exact_node(node,
-					GFP_KERNEL | __GFP_ZERO,
-					0);
-			if (!page)
-				return notifier_from_errno(-ENOMEM);
-			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
-		}
-		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
-			page = alloc_pages_exact_node(node,
-					GFP_KERNEL | __GFP_ZERO,
-					0);
-			if (!page)
-				goto out_free;
-			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
-		}
-		break;
-out_free:
-		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
-		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
-		__free_page(page);
-		return notifier_from_errno(-ENOMEM);
-	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
-		if (prof_cpu_mask != NULL)
-			cpumask_set_cpu(cpu, prof_cpu_mask);
-		break;
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		if (prof_cpu_mask != NULL)
-			cpumask_clear_cpu(cpu, prof_cpu_mask);
-		if (per_cpu(cpu_profile_hits, cpu)[0]) {
-			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
-			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
+	if (prof_cpu_mask != NULL)
+		cpumask_clear_cpu(cpu, prof_cpu_mask);
+
+	for (i = 0; i < 2; i++) {
+		if (per_cpu(cpu_profile_hits, cpu)[i]) {
+			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
+			per_cpu(cpu_profile_hits, cpu)[i] = NULL;
 			__free_page(page);
 		}
+
 		if (per_cpu(cpu_profile_hits, cpu)[1]) {
 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
 			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
 			__free_page(page);
 		}
-		break;
 	}
-	return NOTIFY_OK;
+	return 0;
+}
+
+int __cpuinit profile_prepare_cpu(unsigned int cpu)
+{
+	int i, node = cpu_to_mem(cpu);
+	struct page *page;
+
+	per_cpu(cpu_profile_flip, cpu) = 0;
+
+	for (i = 0; i < 2; i++) {
+		if (per_cpu(cpu_profile_hits, cpu)[i])
+			continue;
+
+		page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+		if (!page) {
+			profile_dead_cpu(cpu);
+			return -ENOMEM;
+		}
+		per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
+
+	}
+	return 0;
 }
+
+int __cpuinit profile_online_cpu(unsigned int cpu)
+{
+	if (prof_cpu_mask != NULL)
+		cpumask_set_cpu(cpu, prof_cpu_mask);
+
+	return 0;
+}
+
 #else /* !CONFIG_SMP */
 #define profile_flip_buffers()		do { } while (0)
 #define profile_discard_flip_buffers()	do { } while (0)
-#define profile_cpu_callback		NULL
 
 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
@@ -612,7 +607,7 @@ out_cleanup:
 #define create_hash_tables()			({ 0; })
 #endif
 
-int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
+int __init create_proc_profile(void)
 {
 	struct proc_dir_entry *entry;
 
@@ -625,7 +620,6 @@ int __ref create_proc_profile(void) /* f
 	if (!entry)
 		return 0;
 	entry->size = (1+prof_len) * sizeof(atomic_t);
-	hotcpu_notifier(profile_cpu_callback, 0);
 	return 0;
 }
 module_init(create_proc_profile);



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 37/40] x86: x2apic: Convert to cpu hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (35 preceding siblings ...)
  2013-01-31 12:11 ` [patch 36/40] profile: Convert ot " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 38/40] smp: Convert core to " Thomas Gleixner
                   ` (4 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Richard Weinberger

[-- Attachment #1: 0003-cpu-hotplug-convert-x2apic.patch --]
[-- Type: text/plain, Size: 3755 bytes --]

From: Richard Weinberger <richard@nod.at>

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/apic/x2apic_cluster.c |   80 ++++++++++++----------------------
 include/linux/cpuhotplug.h            |    1 
 2 files changed, 31 insertions(+), 50 deletions(-)

Index: linux-2.6/arch/x86/kernel/apic/x2apic_cluster.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/apic/x2apic_cluster.c
+++ linux-2.6/arch/x86/kernel/apic/x2apic_cluster.c
@@ -145,68 +145,48 @@ static void init_x2apic_ldr(void)
 	}
 }
 
- /*
-  * At CPU state changes, update the x2apic cluster sibling info.
-  */
-static int __cpuinit
-update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
+/*
+ * At CPU state changes, update the x2apic cluster sibling info.
+ */
+int __cpuinit x2apic_prepare_cpu(unsigned int cpu)
 {
-	unsigned int this_cpu = (unsigned long)hcpu;
-	unsigned int cpu;
-	int err = 0;
-
-	switch (action) {
-	case CPU_UP_PREPARE:
-		if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
-					GFP_KERNEL)) {
-			err = -ENOMEM;
-		} else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
-					       GFP_KERNEL)) {
-			free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
-			err = -ENOMEM;
-		}
-		break;
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-	case CPU_DEAD:
-		for_each_online_cpu(cpu) {
-			if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
-				continue;
-			__cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
-			__cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
-		}
-		free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
-		free_cpumask_var(per_cpu(ipi_mask, this_cpu));
-		break;
+	if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
+		return -ENOMEM;
+
+	if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
+		free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
+		return -ENOMEM;
 	}
 
-	return notifier_from_errno(err);
+	return 0;
 }
 
-static struct notifier_block __refdata x2apic_cpu_notifier = {
-	.notifier_call = update_clusterinfo,
-};
-
-static int x2apic_init_cpu_notifier(void)
+int __cpuinit x2apic_dead_cpu(unsigned int this_cpu)
 {
-	int cpu = smp_processor_id();
-
-	zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
-	zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
+	int cpu;
 
-	BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
-
-	__cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
-	register_hotcpu_notifier(&x2apic_cpu_notifier);
-	return 1;
+	for_each_online_cpu(cpu) {
+		if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
+			continue;
+		__cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
+		__cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
+	}
+	free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
+	free_cpumask_var(per_cpu(ipi_mask, this_cpu));
+	return 0;
 }
 
 static int x2apic_cluster_probe(void)
 {
-	if (x2apic_mode)
-		return x2apic_init_cpu_notifier();
-	else
+	int cpu = smp_processor_id();
+
+	if (!x2apic_mode)
 		return 0;
+
+	__cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
+	cpuhp_setup_state(CPUHP_X2APIC_PREPARE, x2apic_prepare_cpu,
+			  x2apic_dead_cpu);
+	return 1;
 }
 
 static const struct cpumask *x2apic_cluster_target_cpus(void)
Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -16,6 +16,7 @@ enum cpuhp_states {
 	CPUHP_HRTIMERS_PREPARE,
 	CPUHP_TIMERS_PREPARE,
 	CPUHP_PROFILE_PREPARE,
+	CPUHP_X2APIC_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_CLOCKEVENTS_DEAD,



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 39/40] relayfs: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (37 preceding siblings ...)
  2013-01-31 12:11 ` [patch 38/40] smp: Convert core to " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 40/40] slab: " Thomas Gleixner
                   ` (2 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Richard Weinberger

[-- Attachment #1: 0005-cpu-hotplug-convert-relayfs.patch --]
[-- Type: text/plain, Size: 3581 bytes --]

From: Richard Weinberger <richard@nod.at>

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |    7 +++++
 kernel/cpu.c               |    4 +++
 kernel/relay.c             |   59 ++++++++++-----------------------------------
 3 files changed, 25 insertions(+), 45 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -18,6 +18,7 @@ enum cpuhp_states {
 	CPUHP_PROFILE_PREPARE,
 	CPUHP_X2APIC_PREPARE,
 	CPUHP_SMPCFD_PREPARE,
+	CPUHP_RELAY_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_CLOCKEVENTS_DEAD,
@@ -204,4 +205,10 @@ int profile_online_cpu(unsigned int cpu)
 int smpcfd_prepare_cpu(unsigned int cpu);
 int smpcfd_dead_cpu(unsigned int cpu);
 
+#ifdef CONFIG_RELAY
+int relay_prepare_cpu(unsigned int cpu);
+#else
+#define relay_prepare_cpu	NULL
+#endif
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -768,6 +768,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = smpcfd_prepare_cpu,
 		.teardown = smpcfd_dead_cpu,
 	},
+	[CPUHP_RELAY_PREPARE] = {
+		.startup = relay_prepare_cpu,
+		.teardown = NULL,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
Index: linux-2.6/kernel/relay.c
===================================================================
--- linux-2.6.orig/kernel/relay.c
+++ linux-2.6/kernel/relay.c
@@ -508,46 +508,24 @@ static void setup_callbacks(struct rchan
 	chan->cb = cb;
 }
 
-/**
- * 	relay_hotcpu_callback - CPU hotplug callback
- * 	@nb: notifier block
- * 	@action: hotplug action to take
- * 	@hcpu: CPU number
- *
- * 	Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
-				unsigned long action,
-				void *hcpu)
+int __cpuinit relay_prepare_cpu(unsigned int cpu)
 {
-	unsigned int hotcpu = (unsigned long)hcpu;
 	struct rchan *chan;
 
-	switch(action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		mutex_lock(&relay_channels_mutex);
-		list_for_each_entry(chan, &relay_channels, list) {
-			if (chan->buf[hotcpu])
-				continue;
-			chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
-			if(!chan->buf[hotcpu]) {
-				printk(KERN_ERR
-					"relay_hotcpu_callback: cpu %d buffer "
-					"creation failed\n", hotcpu);
-				mutex_unlock(&relay_channels_mutex);
-				return notifier_from_errno(-ENOMEM);
-			}
-		}
-		mutex_unlock(&relay_channels_mutex);
-		break;
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		/* No need to flush the cpu : will be flushed upon
-		 * final relay_flush() call. */
-		break;
+	mutex_lock(&relay_channels_mutex);
+	list_for_each_entry(chan, &relay_channels, list) {
+		if (chan->buf[cpu])
+			continue;
+		chan->buf[cpu] = relay_open_buf(chan, cpu);
+		if(!chan->buf[cpu]) {
+			pr_err("relay: cpu %d buffer creation failed\n", cpu);
+			mutex_unlock(&relay_channels_mutex);
+			return -ENOMEM;
+ 		}
 	}
-	return NOTIFY_OK;
+
+	mutex_unlock(&relay_channels_mutex);
+	return 0;
 }
 
 /**
@@ -1355,12 +1333,3 @@ const struct file_operations relay_file_
 	.splice_read	= relay_file_splice_read,
 };
 EXPORT_SYMBOL_GPL(relay_file_operations);
-
-static __init int relay_init(void)
-{
-
-	hotcpu_notifier(relay_hotcpu_callback, 0);
-	return 0;
-}
-
-early_initcall(relay_init);



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 38/40] smp: Convert core to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (36 preceding siblings ...)
  2013-01-31 12:11 ` [patch 37/40] x86: x2apic: Convert to cpu " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 12:11 ` [patch 39/40] relayfs: Convert " Thomas Gleixner
                   ` (3 subsequent siblings)
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Richard Weinberger

[-- Attachment #1: 0004-cpu-hotplug-convert-smp-core.patch --]
[-- Type: text/plain, Size: 3490 bytes --]

From: Richard Weinberger <richard@nod.at>

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |    5 ++++
 kernel/cpu.c               |    4 +++
 kernel/smp.c               |   50 ++++++++++++++++-----------------------------
 3 files changed, 27 insertions(+), 32 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -17,6 +17,7 @@ enum cpuhp_states {
 	CPUHP_TIMERS_PREPARE,
 	CPUHP_PROFILE_PREPARE,
 	CPUHP_X2APIC_PREPARE,
+	CPUHP_SMPCFD_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_CLOCKEVENTS_DEAD,
@@ -199,4 +200,8 @@ int profile_online_cpu(unsigned int cpu)
 #define profile_online_cpu	NULL
 #endif
 
+/* SMP core functions */
+int smpcfd_prepare_cpu(unsigned int cpu);
+int smpcfd_dead_cpu(unsigned int cpu);
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -764,6 +764,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = profile_prepare_cpu,
 		.teardown = profile_dead_cpu,
 	},
+	[CPUHP_SMPCFD_PREPARE] = {
+		.startup = smpcfd_prepare_cpu,
+		.teardown = smpcfd_dead_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
Index: linux-2.6/kernel/smp.c
===================================================================
--- linux-2.6.orig/kernel/smp.c
+++ linux-2.6/kernel/smp.c
@@ -45,45 +45,32 @@ struct call_single_queue {
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
 
-static int
-hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+int __cpuinit smpcfd_prepare_cpu(unsigned int cpu)
 {
-	long cpu = (long)hcpu;
 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
-				cpu_to_node(cpu)))
-			return notifier_from_errno(-ENOMEM);
-		if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
-				cpu_to_node(cpu)))
-			return notifier_from_errno(-ENOMEM);
-		break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
+	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+				     cpu_to_node(cpu)))
+		return -ENOMEM;
+	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
+				     cpu_to_node(cpu))) {
 		free_cpumask_var(cfd->cpumask);
-		free_cpumask_var(cfd->cpumask_ipi);
-		break;
-#endif
-	};
-
-	return NOTIFY_OK;
+		return -ENOMEM;
+	}
+	return;
 }
 
-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
-	.notifier_call		= hotplug_cfd,
-};
+int __cpuinit smpcfd_dead_cpu(unsigned int cpu)
+{
+	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
+
+	free_cpumask_var(cfd->cpumask);
+	free_cpumask_var(cfd->cpumask_ipi);
+	return 0;
+}
 
 void __init call_function_init(void)
 {
-	void *cpu = (void *)(long)smp_processor_id();
 	int i;
 
 	for_each_possible_cpu(i) {
@@ -93,8 +80,7 @@ void __init call_function_init(void)
 		INIT_LIST_HEAD(&q->list);
 	}
 
-	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
-	register_cpu_notifier(&hotplug_cfd_notifier);
+	smpcfd_prepare_cpu(smp_processor_id());
 }
 
 /*



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 40/40] slab: Convert to hotplug state machine
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (38 preceding siblings ...)
  2013-01-31 12:11 ` [patch 39/40] relayfs: Convert " Thomas Gleixner
@ 2013-01-31 12:11 ` Thomas Gleixner
  2013-01-31 20:23 ` [patch 00/40] CPU hotplug rework - episode I Andrew Morton
  2013-02-09  0:28 ` Paul E. McKenney
  41 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 12:11 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Richard Weinberger

[-- Attachment #1: 0006-cpu-hotplug-convert-slab.patch --]
[-- Type: text/plain, Size: 5818 bytes --]

From: Richard Weinberger <richard@nod.at>

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/cpuhotplug.h |   15 ++++++
 kernel/cpu.c               |    8 +++
 mm/slab.c                  |  102 ++++++++++++++++++---------------------------
 3 files changed, 64 insertions(+), 61 deletions(-)

Index: linux-2.6/include/linux/cpuhotplug.h
===================================================================
--- linux-2.6.orig/include/linux/cpuhotplug.h
+++ linux-2.6/include/linux/cpuhotplug.h
@@ -19,6 +19,7 @@ enum cpuhp_states {
 	CPUHP_X2APIC_PREPARE,
 	CPUHP_SMPCFD_PREPARE,
 	CPUHP_RELAY_PREPARE,
+	CPUHP_SLAB_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_NOTIFY_DEAD,
 	CPUHP_CLOCKEVENTS_DEAD,
@@ -49,6 +50,7 @@ enum cpuhp_states {
 	CPUHP_WORKQUEUE_ONLINE,
 	CPUHP_CPUFREQ_ONLINE,
 	CPUHP_RCUTREE_ONLINE,
+	CPUHP_SLAB_ONLINE,
 	CPUHP_NOTIFY_ONLINE,
 	CPUHP_PROFILE_ONLINE,
 	CPUHP_NOTIFY_DOWN_PREPARE,
@@ -211,4 +213,17 @@ int relay_prepare_cpu(unsigned int cpu);
 #define relay_prepare_cpu	NULL
 #endif
 
+/* slab hotplug events */
+#if defined(CONFIG_SLAB) && defined(CONFIG_SMP)
+int slab_prepare_cpu(unsigned int cpu);
+int slab_online_cpu(unsigned int cpu);
+int slab_offline_cpu(unsigned int cpu);
+int slab_dead_cpu(unsigned int cpu);
+#else
+#define slab_prepare_cpu	NULL
+#define slab_online_cpu		NULL
+#define slab_offline_cpu	NULL
+#define slab_dead_cpu		NULL
+#endif
+
 #endif
Index: linux-2.6/kernel/cpu.c
===================================================================
--- linux-2.6.orig/kernel/cpu.c
+++ linux-2.6/kernel/cpu.c
@@ -772,6 +772,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = relay_prepare_cpu,
 		.teardown = NULL,
 	},
+	[CPUHP_SLAB_PREPARE] = {
+		.startup = slab_prepare_cpu,
+		.teardown = slab_dead_cpu,
+	},
 	[CPUHP_NOTIFY_PREPARE] = {
 		.startup = notify_prepare,
 		.teardown = NULL,
@@ -820,6 +824,10 @@ static struct cpuhp_step cpuhp_bp_states
 		.startup = profile_online_cpu,
 		.teardown = NULL,
 	},
+	[CPUHP_SLAB_ONLINE] = {
+		.startup = slab_online_cpu,
+		.teardown = slab_offline_cpu,
+	},
 	[CPUHP_NOTIFY_DOWN_PREPARE] = {
 		.startup = NULL,
 		.teardown = notify_down_prepare,
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c
+++ linux-2.6/mm/slab.c
@@ -1426,65 +1426,51 @@ bad:
 	return -ENOMEM;
 }
 
-static int __cpuinit cpuup_callback(struct notifier_block *nfb,
-				    unsigned long action, void *hcpu)
+int __cpuinit slab_prepare_cpu(unsigned int cpu)
 {
-	long cpu = (long)hcpu;
-	int err = 0;
+	int err;
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		mutex_lock(&slab_mutex);
-		err = cpuup_prepare(cpu);
-		mutex_unlock(&slab_mutex);
-		break;
-	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
-		start_cpu_timer(cpu);
-		break;
-#ifdef CONFIG_HOTPLUG_CPU
-  	case CPU_DOWN_PREPARE:
-  	case CPU_DOWN_PREPARE_FROZEN:
-		/*
-		 * Shutdown cache reaper. Note that the slab_mutex is
-		 * held so that if cache_reap() is invoked it cannot do
-		 * anything expensive but will only modify reap_work
-		 * and reschedule the timer.
-		*/
-		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
-		/* Now the cache_reaper is guaranteed to be not running. */
-		per_cpu(slab_reap_work, cpu).work.func = NULL;
-  		break;
-  	case CPU_DOWN_FAILED:
-  	case CPU_DOWN_FAILED_FROZEN:
-		start_cpu_timer(cpu);
-  		break;
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		/*
-		 * Even if all the cpus of a node are down, we don't free the
-		 * kmem_list3 of any cache. This to avoid a race between
-		 * cpu_down, and a kmalloc allocation from another cpu for
-		 * memory from the node of the cpu going down.  The list3
-		 * structure is usually allocated from kmem_cache_create() and
-		 * gets destroyed at kmem_cache_destroy().
-		 */
-		/* fall through */
-#endif
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-		mutex_lock(&slab_mutex);
-		cpuup_canceled(cpu);
-		mutex_unlock(&slab_mutex);
-		break;
-	}
-	return notifier_from_errno(err);
+	mutex_lock(&slab_mutex);
+	err = cpuup_prepare(cpu);
+	mutex_unlock(&slab_mutex);
+	return err;
 }
 
-static struct notifier_block __cpuinitdata cpucache_notifier = {
-	&cpuup_callback, NULL, 0
-};
+/*
+ * This is called for a failed online attempt and for a successful
+ * offline.
+ *
+ * Even if all the cpus of a node are down, we don't free the
+ * kmem_list3 of any cache. This to avoid a race between cpu_down, and
+ * a kmalloc allocation from another cpu for memory from the node of
+ * the cpu going down.  The list3 structure is usually allocated from
+ * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
+ */
+int __cpuinit slab_dead_cpu(unsigned int cpu)
+{
+	mutex_lock(&slab_mutex);
+	cpuup_canceled(cpu);
+	mutex_unlock(&slab_mutex);
+	return 0;
+}
+
+int __cpuinit slab_online_cpu(unsigned int cpu)
+{
+	start_cpu_timer(cpu);
+}
+
+int __cpuinit slab_offline_cpu(unsigned int cpu)
+{
+	/*
+	 * Shutdown cache reaper. Note that the slab_mutex is held so
+	 * that if cache_reap() is invoked it cannot do anything
+	 * expensive but will only modify reap_work and reschedule the
+	 * timer.
+	 */
+	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
+	/* Now the cache_reaper is guaranteed to be not running. */
+	per_cpu(slab_reap_work, cpu).work.func = NULL;
+}
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
 /*
@@ -1764,12 +1750,6 @@ void __init kmem_cache_init_late(void)
 	/* Done! */
 	slab_state = FULL;
 
-	/*
-	 * Register a cpu startup notifier callback that initializes
-	 * cpu_cache_get for all new cpus
-	 */
-	register_cpu_notifier(&cpucache_notifier);
-
 #ifdef CONFIG_NUMA
 	/*
 	 * Register a memory hotplug callback that initializes and frees



^ permalink raw reply	[flat|nested] 67+ messages in thread

* [patch 00/40] CPU hotplug rework - episode I
@ 2013-01-31 15:44 Thomas Gleixner
  2013-01-31 12:11 ` [patch 01/40] smpboot: Allow selfparking per cpu threads Thomas Gleixner
                   ` (41 more replies)
  0 siblings, 42 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 15:44 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Linus Torvalds, Andrew Morton

The current CPU hotplug implementation has become an increasing
nightmare full of races and undocumented behaviour. The main issue of
the current hotplug scheme is the completely asymetric
startup/teardown process. The hotplug notifiers are mostly
undocumented and the CPU_* actions in lots of implementations seem to
be randomly chosen.

We had a long discussion in San Diego last year about reworking the
hotplug core into a fully symetric state machine. After a few doomed
attempts to convert the existing code into a state machine, I finally
found a workable solution.

The following patch series implements a trivial array based state
machine, which replaces the existing steps in cpu_up/down and also the
notifiers which must run on the hotplugged cpu are converted to a
callback array. This documents clearly the ordering of the callbacks
and also makes the asymetric behaviour very obvious.

This series converts the stop_machine thread to the smpboot
infrastructure, implements the core state machine and converts all
notifiers which have ordering constraints plus a randomly chosen bunch
of other notifiers to the state machine.

The runtime installed callbacks are immediately executed by the core
code on or on behalf of all cpus which have already reached the
corresponding state. A non executing installer function is there as
well to allow simple migration of the existing notifier maze.

The diffstat of the complete series is appended below.

 36 files changed, 1300 insertions(+), 1179 deletions(-)

We add slightly more code at this stage (225 lines alone in a header
file), but most of the conversions are removing code and we have only
tackled about 30 of 130+ instances. Even with the current conversion
state, the resulting text size shrinks already.

Known issues:
The current series has a not yet solved section mismatch issue versus
the array callbacks which are already installed at compile time.

There is more work in the pipeline:

 - Convert all notifiers to the state machine callbacks

 - Analyze the asymetric callbacks and fix them if possible or at
   least document why they need to be asymetric.

 - Unify the low level bringup across the architectures
   (e.g. synchronization between boot and hotplugged cpus, common
   setups, scheduler exposure, etc.)

At the end hotplug should run through an array of callbacks on both
sides with explicit core synchronization points. The ordering should
look like this:

CPUHP_OFFLINE                   // Start state.
CPUHP_PREP_<hardware>           // Kick CPU into life / let it die
CPUHP_PREP_<datastructures>     // Get datastructures set up / freed.
CPUHP_PREP_<threads>            // Create threads for cpu
CPUHP_SYNC			// Synchronization point
CPUHP_INIT_<hardware>		// Startup/teardown on the CPU (interrupts, timers ...)
CPUHP_SCHED_<stuff on CPU>      // Unpark/park per cpu local threads on the CPU.
CPUHP_ENABLE_<stuff_on_CPU>	// Enable/disable facilities 
CPUHP_SYNC			// Synchronization point
CPUHP_SCHED                     // Expose/remove CPU from general scheduler.
CPUHP_ONLINE                    // Final state

All PREP states can fail and the corresponding teardown callbacks are
invoked in the same way as they are invoked on offlining.

The existing DOWN_PREPARE notifier has only two instances which
actually might prevent the CPU from going down: rcu_tree and
padata. We might need to keep them, but these can be explicitly
documented asymetric states.

Quite some of the ONLINE/DOWN_PREPARE notifiers are racy and need a
proper inspection. All other valid users of ONLINE/DOWN_PREPARE
notifiers should be put into the CPUHP_ENABLE state block and be
executed on the hotplugged CPU. I have not seen a single instance
(except scheduler) which needs to be executed before we remove the CPU
from the general scheduler itself.

This final design needs quite some massaging of the current scheduler
code, but last time I discussed this with scheduler folks it seemed to
be doable with a reasonable effort. Other than that I don't see any
(un)real showstoppers on the horizon.

Thanks,

	tglx
---
 arch/arm/kernel/perf_event_cpu.c              |   28 -
 arch/arm/vfp/vfpmodule.c                      |   29 -
 arch/blackfin/kernel/perf_event.c             |   25 -
 arch/powerpc/perf/core-book3s.c               |   29 -
 arch/s390/kernel/perf_cpum_cf.c               |   37 -
 arch/s390/kernel/vtime.c                      |   18 
 arch/sh/kernel/perf_event.c                   |   22 
 arch/x86/kernel/apic/x2apic_cluster.c         |   80 +--
 arch/x86/kernel/cpu/perf_event.c              |   78 +--
 arch/x86/kernel/cpu/perf_event_amd.c          |    6 
 arch/x86/kernel/cpu/perf_event_amd_ibs.c      |   54 --
 arch/x86/kernel/cpu/perf_event_intel.c        |    6 
 arch/x86/kernel/cpu/perf_event_intel_uncore.c |  109 +---
 arch/x86/kernel/tboot.c                       |   23 
 drivers/clocksource/arm_generic.c             |   40 -
 drivers/cpufreq/cpufreq_stats.c               |   55 --
 include/linux/cpu.h                           |   45 -
 include/linux/cpuhotplug.h                    |  207 ++++++++
 include/linux/perf_event.h                    |   21 
 include/linux/smpboot.h                       |    5 
 init/main.c                                   |   15 
 kernel/cpu.c                                  |  613 ++++++++++++++++++++++----
 kernel/events/core.c                          |   36 -
 kernel/hrtimer.c                              |   47 -
 kernel/profile.c                              |   92 +--
 kernel/rcutree.c                              |   95 +---
 kernel/sched/core.c                           |  251 ++++------
 kernel/sched/fair.c                           |   16 
 kernel/smp.c                                  |   50 --
 kernel/smpboot.c                              |   11 
 kernel/smpboot.h                              |    4 
 kernel/stop_machine.c                         |  154 ++----
 kernel/time/clockevents.c                     |   13 
 kernel/timer.c                                |   43 -
 kernel/workqueue.c                            |   80 +--
 virt/kvm/kvm_main.c                           |   42 -
 36 files changed, 1300 insertions(+), 1179 deletions(-)


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (39 preceding siblings ...)
  2013-01-31 12:11 ` [patch 40/40] slab: " Thomas Gleixner
@ 2013-01-31 20:23 ` Andrew Morton
  2013-01-31 21:48   ` Thomas Gleixner
  2013-02-09  0:28 ` Paul E. McKenney
  41 siblings, 1 reply; 67+ messages in thread
From: Andrew Morton @ 2013-01-31 20:23 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Linus Torvalds

On Thu, 31 Jan 2013 15:44:10 -0000
Thomas Gleixner <tglx@linutronix.de> wrote:

> At the end hotplug should run through an array of callbacks on both
> sides with explicit core synchronization points. The ordering should
> look like this:
> 
> CPUHP_OFFLINE                   // Start state.
> CPUHP_PREP_<hardware>           // Kick CPU into life / let it die
> CPUHP_PREP_<datastructures>     // Get datastructures set up / freed.
> CPUHP_PREP_<threads>            // Create threads for cpu
> CPUHP_SYNC			// Synchronization point
> CPUHP_INIT_<hardware>		// Startup/teardown on the CPU (interrupts, timers ...)
> CPUHP_SCHED_<stuff on CPU>      // Unpark/park per cpu local threads on the CPU.
> CPUHP_ENABLE_<stuff_on_CPU>	// Enable/disable facilities 
> CPUHP_SYNC			// Synchronization point
> CPUHP_SCHED                     // Expose/remove CPU from general scheduler.
> CPUHP_ONLINE                    // Final state

What does CPUHP_SYNC do?

Methinks Tejun needed a cc on this lot ;)

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-01-31 20:23 ` [patch 00/40] CPU hotplug rework - episode I Andrew Morton
@ 2013-01-31 21:48   ` Thomas Gleixner
  2013-01-31 21:59     ` Linus Torvalds
  0 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 21:48 UTC (permalink / raw)
  To: Andrew Morton
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm, Linus Torvalds

On Thu, 31 Jan 2013, Andrew Morton wrote:

> On Thu, 31 Jan 2013 15:44:10 -0000
> Thomas Gleixner <tglx@linutronix.de> wrote:
> 
> > At the end hotplug should run through an array of callbacks on both
> > sides with explicit core synchronization points. The ordering should
> > look like this:
> > 
> > CPUHP_OFFLINE                   // Start state.
> > CPUHP_PREP_<hardware>           // Kick CPU into life / let it die
> > CPUHP_PREP_<datastructures>     // Get datastructures set up / freed.
> > CPUHP_PREP_<threads>            // Create threads for cpu
> > CPUHP_SYNC			// Synchronization point
> > CPUHP_INIT_<hardware>		// Startup/teardown on the CPU (interrupts, timers ...)
> > CPUHP_SCHED_<stuff on CPU>      // Unpark/park per cpu local threads on the CPU.
> > CPUHP_ENABLE_<stuff_on_CPU>	// Enable/disable facilities 
> > CPUHP_SYNC			// Synchronization point
> > CPUHP_SCHED                     // Expose/remove CPU from general scheduler.
> > CPUHP_ONLINE                    // Final state
> 
> What does CPUHP_SYNC do?

This is a future step which makes sure that the cpu which controls the
bringup and the teardown of the hotplugged cpu are synchronizing at
some point. Right now, we have this synchronization burried somewhere
in the architecture code and of course every arch does it different
versus the generic bringup/teardown mechanisms.
 
> Methinks Tejun needed a cc on this lot ;)

Not really. The workqueue hotplug scheme is today one of the sanest in
that area. Earlier versions have been a prime example of hotplug hell!

TJ has converted it via the notifier priorities to a symetric
startup/teardown scheme already. So the conversion is a no brainer and
no real change.

Sure, I should have spent the cycles to add every file owner to the cc
list, but due to brain damage caused by decoding the current hotplug
maze I skipped that painful exercise relying on you to fix it up for
me :)

Thanks,

	tglx




^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-01-31 21:48   ` Thomas Gleixner
@ 2013-01-31 21:59     ` Linus Torvalds
  2013-01-31 22:44       ` Thomas Gleixner
  0 siblings, 1 reply; 67+ messages in thread
From: Linus Torvalds @ 2013-01-31 21:59 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Andrew Morton, LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Paul McKenney, Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Fri, Feb 1, 2013 at 8:48 AM, Thomas Gleixner <tglx@linutronix.de> wrote:
>> Methinks Tejun needed a cc on this lot ;)
>
> Not really.

I think we want as many people as possible cc'd on this. You may think
it's an obvious improvement, but maybe it's just because you now
understand the code because you wrote it yourself, not because it's
*actually* better.

Having some explicitly documented states may be nice, but do we need
eleven of them? And do we want to expose them? At least not for the
f*cking notifiers, I hope. Notifiers are a disgrace, and almost all of
them are a major design mistake. They all have locking problems, the
introduce internal arbitrary API's that are hard to fix later (because
you have random people who decided to hook into them, which is the
whole *point* of those notifier chains).

Since the patches themselves weren't cc'd, I don't know if you
actually made each state transition do those insane notifiers or not,
but I seriously hope you didn't. With that many states, hopefully the
idea is that you don't have any notifiers at all, and you just then
call the people associated with a particular state directly. Yes? No?

Because if this adds tons of new notifiers, I'm going to say that we
need about a hundred people signing off on the patches.  Part of your
explanation made me think you got rid of the notifiers, but then it
became clear that you just renamed them as "state callbacks". If
that's some generic exposed interface, I'll NAK it. No way in hell do
we want to expose eleven states with some random generic "SMP state
callback interface". F*ck no.

                Linus

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-01-31 21:59     ` Linus Torvalds
@ 2013-01-31 22:44       ` Thomas Gleixner
  2013-01-31 22:55         ` Linus Torvalds
  0 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-01-31 22:44 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Andrew Morton, LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Paul McKenney, Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Fri, 1 Feb 2013, Linus Torvalds wrote:
> On Fri, Feb 1, 2013 at 8:48 AM, Thomas Gleixner <tglx@linutronix.de> wrote:
> I think we want as many people as possible cc'd on this. You may think
> it's an obvious improvement, but maybe it's just because you now
> understand the code because you wrote it yourself, not because it's
> *actually* better.

Fair enough.
 
> Having some explicitly documented states may be nice, but do we need
> eleven of them? And do we want to expose them? At least not for the
> f*cking notifiers, I hope. Notifiers are a disgrace, and almost all of
> them are a major design mistake. They all have locking problems, the
> introduce internal arbitrary API's that are hard to fix later (because
> you have random people who decided to hook into them, which is the
> whole *point* of those notifier chains).

That's the whole point of this. The notifier chains are random places
which allow people to add crap at will. And they did. Just grep for
any of the gazillions hotplug notifier implementations. Though some of
them you might not be able to find because they are burried under a
subsystem macro magic.
 
> Since the patches themselves weren't cc'd, I don't know if you

I screwed up the 0/40 cover header and therefor quilt decided to
exclude you from that 40 patches mail bomb :(

Poke me if you want a private copy

> actually made each state transition do those insane notifiers or not,
> but I seriously hope you didn't. With that many states, hopefully the
> idea is that you don't have any notifiers at all, and you just then
> call the people associated with a particular state directly. Yes? No?

The current lot of patches are converting parts of the documented and
undocumented notifier callbacks into an array of callbacks which
documents the ordering. Some of the patches aggregate multiple
notifiers to a single one which ensures the ordering of the subsystem
specific ones, but at the moment I concentrated to provide a path from
a wild notifier scheme to a documented list of callbacks.
 
> Because if this adds tons of new notifiers, I'm going to say that we

No. It does not add any new notifiers. It simply converts notifier
randomness, i.e. your personal choice of CPU_* action flavor to an
ordered list.

> need about a hundred people signing off on the patches.  Part of your
> explanation made me think you got rid of the notifiers, but then it

I wish I could have got rid of the notifiers. But do you have a way to
do that w/o breaking the world and some more ? The only way to do that
would be reimplementing it from scratch. Sure we can do that, and hell
I know how, but we cannot do that in the current workflow. That would
require a switch back to the 2.odd/even scheme and we all know how
well this works out.

> became clear that you just renamed them as "state callbacks". If

It's not about renaming. It's about making the ordering constraints
clear. It's about documenting the existing horror in a way, that one
can understand the hotplug process w/o hallucinogenic drugs.

> that's some generic exposed interface, I'll NAK it. No way in hell do
> we want to expose eleven states with some random generic "SMP state
> callback interface". F*ck no.

Just face it. The current hotplug maze has 100+ states which are
completely undocumented. They are asymetric vs. startup and
teardown. They just exists and work somehow aside of the occasional
hard to decode hickup.

Do you really want to preserve that state by all means [F*ck no]?

Thanks,

	tglx


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-01-31 22:44       ` Thomas Gleixner
@ 2013-01-31 22:55         ` Linus Torvalds
  2013-02-01 10:51           ` Thomas Gleixner
  0 siblings, 1 reply; 67+ messages in thread
From: Linus Torvalds @ 2013-01-31 22:55 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Andrew Morton, LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Paul McKenney, Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Fri, Feb 1, 2013 at 9:44 AM, Thomas Gleixner <tglx@linutronix.de> wrote:
>
> Just face it. The current hotplug maze has 100+ states which are
> completely undocumented. They are asymetric vs. startup and
> teardown. They just exists and work somehow aside of the occasional
> hard to decode hickup.
>
> Do you really want to preserve that state by all means [F*ck no]?

No., But I also don't want to replace it with "there's now eleven
documented states, and random people hook into random documented
states".

So for me it's the "expose these states" that I get worried about.. A
random driver should not necessarily even be able to *see* this, and
decide to be clever and take advantage of the ordering.

So I'd hope there would be some visibility restrictions. We currently
have drivers already being confused by DOWN_PREPARE vs DOWN_FAILED etc
etc random state transitions, and giving them even more flexibility to
pick random states sounds like a really bad idea. I'd like to make
sure that drivers and filesystems etc do not even *see* the states
that are meant for the scheduler or workqueues, for example).

So 11 states (although some of those seem to have lots of substates,
so there may be many more) is too many to *expose*. It's not
necessarily too many to "have and document", if you see the
difference.

                   Linus

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-01-31 22:55         ` Linus Torvalds
@ 2013-02-01 10:51           ` Thomas Gleixner
  2013-02-07  4:01             ` Rusty Russell
  0 siblings, 1 reply; 67+ messages in thread
From: Thomas Gleixner @ 2013-02-01 10:51 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Andrew Morton, LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Paul McKenney, Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Fri, 1 Feb 2013, Linus Torvalds wrote:

> On Fri, Feb 1, 2013 at 9:44 AM, Thomas Gleixner <tglx@linutronix.de> wrote:
> >
> > Just face it. The current hotplug maze has 100+ states which are
> > completely undocumented. They are asymetric vs. startup and
> > teardown. They just exists and work somehow aside of the occasional
> > hard to decode hickup.
> >
> > Do you really want to preserve that state by all means [F*ck no]?
> 
> No., But I also don't want to replace it with "there's now eleven
> documented states, and random people hook into random documented
> states".

That's not the plan.
 
> So for me it's the "expose these states" that I get worried about.. A
> random driver should not necessarily even be able to *see* this, and
> decide to be clever and take advantage of the ordering.
> 
> So I'd hope there would be some visibility restrictions. We currently
> have drivers already being confused by DOWN_PREPARE vs DOWN_FAILED etc
> etc random state transitions, and giving them even more flexibility to
> pick random states sounds like a really bad idea. I'd like to make
> sure that drivers and filesystems etc do not even *see* the states
> that are meant for the scheduler or workqueues, for example).

The only states where drivers, filesystems etc are going to see in the
end is:

    CPUHP_PREP_<datastructures>     // Get datastructures set up / freed.

This is _before_ a cpu comes to life and _after_ it is gone. And that
does not require ordering.

    CPUHP_ENABLE_<stuff_on_CPU>     // Enable/disable facilities 

This is _before_ a cpu becomes visible to the general scheduler and
_after_ it has been removed from it.

Those states do not require ordering at least not at the driver level.

And they are not going to be exposed with a dozen of substates. The
only information at both stages is going to be: setup or teardown.

The enable/disable stuff is not allowed to fail. There is no reason
why a driver could veto a cpu offline operation.

The only thing which can fail is the setup stage in preparation, where
you could fail to allocate memory etc.

> So 11 states (although some of those seem to have lots of substates,
> so there may be many more) is too many to *expose*. It's not
> necessarily too many to "have and document", if you see the
> difference.

I don't want to expose them to the general public. I just want the
(arch) core states documented proper with an explicit ordering
scheme. Drivers and stuff should not even know about ordering
requirements.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 09/40] cpu: hotplug: Implement setup/removal interface
  2013-01-31 12:11 ` [patch 09/40] cpu: hotplug: Implement setup/removal interface Thomas Gleixner
@ 2013-02-01 13:44   ` Hillf Danton
  2013-02-01 13:52     ` Thomas Gleixner
  0 siblings, 1 reply; 67+ messages in thread
From: Hillf Danton @ 2013-02-01 13:44 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul

On Thu, Jan 31, 2013 at 8:11 PM, Thomas Gleixner <tglx@linutronix.de> wrote:
> +/**
> + * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
> + * @state:     The state to setup
> + * @invoke:    If true, the startup function is invoked for cpus where
> + *             cpu state >= @state
> + * @startup:   startup callback function
> + * @teardown:  teardown callback function
> + *
> + * Returns 0 if successful, otherwise a proper error code
> + */
> +int __cpuhp_setup_state(enum cpuhp_states state, bool invoke,
> +                       int (*startup)(unsigned int cpu),
> +                       int (*teardown)(unsigned int cpu))
> +{
> +       int cpu, ret = 0;
> +
> +       if (cpuhp_cb_check(state))
> +               return -EINVAL;
> +
> +       get_online_cpus();
> +
> +       if (!invoke || !startup)
> +               goto install;
> +
> +       /*
> +        * Try to call the startup callback for each present cpu
> +        * depending on the hotplug state of the cpu.
> +        */
> +       for_each_present_cpu(cpu) {
> +               int ret, cpustate = per_cpu(cpuhp_state, cpu);

s/ret,//

> +
> +               if (cpustate < state)
> +                       continue;
> +
> +               ret = cpuhp_issue_call(cpu, state, startup, true);
> +               if (ret) {
> +                       cpuhp_rollback_install(cpu, state, teardown);
> +                       goto out;
> +               }
> +       }
> +install:
> +       cpuhp_store_callbacks(state, startup, teardown);
> +out:
> +       put_online_cpus();
> +       return ret;
> +}
> +EXPORT_SYMBOL(__cpuhp_setup_state);

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 09/40] cpu: hotplug: Implement setup/removal interface
  2013-02-01 13:44   ` Hillf Danton
@ 2013-02-01 13:52     ` Thomas Gleixner
  0 siblings, 0 replies; 67+ messages in thread
From: Thomas Gleixner @ 2013-02-01 13:52 UTC (permalink / raw)
  To: Hillf Danton
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul

On Fri, 1 Feb 2013, Hillf Danton wrote:
> On Thu, Jan 31, 2013 at 8:11 PM, Thomas Gleixner <tglx@linutronix.de> wrote:
> > +/**
> > +       for_each_present_cpu(cpu) {
> > +               int ret, cpustate = per_cpu(cpuhp_state, cpu);
> 
> s/ret,//

Duh, yes.

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-02-01 10:51           ` Thomas Gleixner
@ 2013-02-07  4:01             ` Rusty Russell
  0 siblings, 0 replies; 67+ messages in thread
From: Rusty Russell @ 2013-02-07  4:01 UTC (permalink / raw)
  To: Thomas Gleixner, Linus Torvalds
  Cc: Andrew Morton, LKML, Ingo Molnar, Peter Zijlstra, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

Thomas Gleixner <tglx@linutronix.de> writes:
> On Fri, 1 Feb 2013, Linus Torvalds wrote:
>> So for me it's the "expose these states" that I get worried about.. A
>> random driver should not necessarily even be able to *see* this, and
>> decide to be clever and take advantage of the ordering.
>> 
>> So I'd hope there would be some visibility restrictions. We currently
>> have drivers already being confused by DOWN_PREPARE vs DOWN_FAILED etc
>> etc random state transitions, and giving them even more flexibility to
>> pick random states sounds like a really bad idea. I'd like to make
>> sure that drivers and filesystems etc do not even *see* the states
>> that are meant for the scheduler or workqueues, for example).

Yeah, I assume Episode II is where we collapse each into sane states
as Thomas clarified.  That can be reviewed: I'd hate to try to do it
in one go.

Cheers,
Rusty.

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 00/40] CPU hotplug rework - episode I
  2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
                   ` (40 preceding siblings ...)
  2013-01-31 20:23 ` [patch 00/40] CPU hotplug rework - episode I Andrew Morton
@ 2013-02-09  0:28 ` Paul E. McKenney
  41 siblings, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-09  0:28 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger,
	Magnus Damm <magnus.damm@gmail.com> Linus Torvalds,
	Andrew Morton

On Thu, Jan 31, 2013 at 12:11:11PM -0000, Thomas Gleixner wrote:
> The current CPU hotplug implementation has become an increasing
> nightmare full of races and undocumented behaviour. The main issue of
> the current hotplug scheme is the completely asymetric
> startup/teardown process. The hotplug notifiers are mostly
> undocumented and the CPU_* actions in lots of implementations seem to
> be randomly chosen.
> 
> We had a long discussion in San Diego last year about reworking the
> hotplug core into a fully symetric state machine. After a few doomed
> attempts to convert the existing code into a state machine, I finally
> found a workable solution.
> 
> The following patch series implements a trivial array based state
> machine, which replaces the existing steps in cpu_up/down and also the
> notifiers which must run on the hotplugged cpu are converted to a
> callback array. This documents clearly the ordering of the callbacks
> and also makes the asymetric behaviour very obvious.
> 
> This series converts the stop_machine thread to the smpboot
> infrastructure, implements the core state machine and converts all
> notifiers which have ordering constraints plus a randomly chosen bunch
> of other notifiers to the state machine.
> 
> The runtime installed callbacks are immediately executed by the core
> code on or on behalf of all cpus which have already reached the
> corresponding state. A non executing installer function is there as
> well to allow simple migration of the existing notifier maze.
> 
> The diffstat of the complete series is appended below.
> 
>  36 files changed, 1300 insertions(+), 1179 deletions(-)
> 
> We add slightly more code at this stage (225 lines alone in a header
> file), but most of the conversions are removing code and we have only
> tackled about 30 of 130+ instances. Even with the current conversion
> state, the resulting text size shrinks already.
> 
> Known issues:
> The current series has a not yet solved section mismatch issue versus
> the array callbacks which are already installed at compile time.
> 
> There is more work in the pipeline:
> 
>  - Convert all notifiers to the state machine callbacks
> 
>  - Analyze the asymetric callbacks and fix them if possible or at
>    least document why they need to be asymetric.
> 
>  - Unify the low level bringup across the architectures
>    (e.g. synchronization between boot and hotplugged cpus, common
>    setups, scheduler exposure, etc.)
> 
> At the end hotplug should run through an array of callbacks on both
> sides with explicit core synchronization points. The ordering should
> look like this:
> 
> CPUHP_OFFLINE                   // Start state.
> CPUHP_PREP_<hardware>           // Kick CPU into life / let it die
> CPUHP_PREP_<datastructures>     // Get datastructures set up / freed.
> CPUHP_PREP_<threads>            // Create threads for cpu
> CPUHP_SYNC			// Synchronization point
> CPUHP_INIT_<hardware>		// Startup/teardown on the CPU (interrupts, timers ...)
> CPUHP_SCHED_<stuff on CPU>      // Unpark/park per cpu local threads on the CPU.
> CPUHP_ENABLE_<stuff_on_CPU>	// Enable/disable facilities 
> CPUHP_SYNC			// Synchronization point
> CPUHP_SCHED                     // Expose/remove CPU from general scheduler.
> CPUHP_ONLINE                    // Final state
> 
> All PREP states can fail and the corresponding teardown callbacks are
> invoked in the same way as they are invoked on offlining.
> 
> The existing DOWN_PREPARE notifier has only two instances which
> actually might prevent the CPU from going down: rcu_tree and
> padata. We might need to keep them, but these can be explicitly
> documented asymetric states.
> 
> Quite some of the ONLINE/DOWN_PREPARE notifiers are racy and need a
> proper inspection. All other valid users of ONLINE/DOWN_PREPARE
> notifiers should be put into the CPUHP_ENABLE state block and be
> executed on the hotplugged CPU. I have not seen a single instance
> (except scheduler) which needs to be executed before we remove the CPU
> from the general scheduler itself.
> 
> This final design needs quite some massaging of the current scheduler
> code, but last time I discussed this with scheduler folks it seemed to
> be doable with a reasonable effort. Other than that I don't see any
> (un)real showstoppers on the horizon.

Very cool!!!  At first glance, this looks like it dovetails very
nicely with Srivatsa Bhat's work on the hotplug locking.

							Thanx, Paul

> Thanks,
> 
> 	tglx
> ---
>  arch/arm/kernel/perf_event_cpu.c              |   28 -
>  arch/arm/vfp/vfpmodule.c                      |   29 -
>  arch/blackfin/kernel/perf_event.c             |   25 -
>  arch/powerpc/perf/core-book3s.c               |   29 -
>  arch/s390/kernel/perf_cpum_cf.c               |   37 -
>  arch/s390/kernel/vtime.c                      |   18 
>  arch/sh/kernel/perf_event.c                   |   22 
>  arch/x86/kernel/apic/x2apic_cluster.c         |   80 +--
>  arch/x86/kernel/cpu/perf_event.c              |   78 +--
>  arch/x86/kernel/cpu/perf_event_amd.c          |    6 
>  arch/x86/kernel/cpu/perf_event_amd_ibs.c      |   54 --
>  arch/x86/kernel/cpu/perf_event_intel.c        |    6 
>  arch/x86/kernel/cpu/perf_event_intel_uncore.c |  109 +---
>  arch/x86/kernel/tboot.c                       |   23 
>  drivers/clocksource/arm_generic.c             |   40 -
>  drivers/cpufreq/cpufreq_stats.c               |   55 --
>  include/linux/cpu.h                           |   45 -
>  include/linux/cpuhotplug.h                    |  207 ++++++++
>  include/linux/perf_event.h                    |   21 
>  include/linux/smpboot.h                       |    5 
>  init/main.c                                   |   15 
>  kernel/cpu.c                                  |  613 ++++++++++++++++++++++----
>  kernel/events/core.c                          |   36 -
>  kernel/hrtimer.c                              |   47 -
>  kernel/profile.c                              |   92 +--
>  kernel/rcutree.c                              |   95 +---
>  kernel/sched/core.c                           |  251 ++++------
>  kernel/sched/fair.c                           |   16 
>  kernel/smp.c                                  |   50 --
>  kernel/smpboot.c                              |   11 
>  kernel/smpboot.h                              |    4 
>  kernel/stop_machine.c                         |  154 ++----
>  kernel/time/clockevents.c                     |   13 
>  kernel/timer.c                                |   43 -
>  kernel/workqueue.c                            |   80 +--
>  virt/kvm/kvm_main.c                           |   42 -
>  36 files changed, 1300 insertions(+), 1179 deletions(-)
> 
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 01/40] smpboot: Allow selfparking per cpu threads
  2013-01-31 12:11 ` [patch 01/40] smpboot: Allow selfparking per cpu threads Thomas Gleixner
@ 2013-02-09  0:29   ` Paul E. McKenney
  2013-02-14 17:46   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
  1 sibling, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-09  0:29 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:12PM -0000, Thomas Gleixner wrote:
> The stop machine threads are still killed when a cpu goes offline. The
> reason is that the thread is used to bring the cpu down, so it can't
> be parked along with the other per cpu threads.
> 
> Allow a per cpu thread to be excluded from automatic parking, so it
> can park itself once it's done
> 
> Add a create callback function as well.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  include/linux/smpboot.h |    5 +++++
>  kernel/smpboot.c        |    5 +++--
>  2 files changed, 8 insertions(+), 2 deletions(-)
> 
> Index: linux-2.6/include/linux/smpboot.h
> ===================================================================
> --- linux-2.6.orig/include/linux/smpboot.h
> +++ linux-2.6/include/linux/smpboot.h
> @@ -14,6 +14,8 @@ struct smpboot_thread_data;
>   * @thread_should_run:	Check whether the thread should run or not. Called with
>   *			preemption disabled.
>   * @thread_fn:		The associated thread function
> + * @create:		Optional setup function, called when the thread gets
> + *			created (Not called from the thread context)
>   * @setup:		Optional setup function, called when the thread gets
>   *			operational the first time
>   * @cleanup:		Optional cleanup function, called when the thread
> @@ -22,6 +24,7 @@ struct smpboot_thread_data;
>   *			parked (cpu offline)
>   * @unpark:		Optional unpark function, called when the thread is
>   *			unparked (cpu online)
> + * @selfparking:	Thread is not parked by the park function.
>   * @thread_comm:	The base name of the thread
>   */
>  struct smp_hotplug_thread {
> @@ -29,10 +32,12 @@ struct smp_hotplug_thread {
>  	struct list_head		list;
>  	int				(*thread_should_run)(unsigned int cpu);
>  	void				(*thread_fn)(unsigned int cpu);
> +	void				(*create)(unsigned int cpu);
>  	void				(*setup)(unsigned int cpu);
>  	void				(*cleanup)(unsigned int cpu, bool online);
>  	void				(*park)(unsigned int cpu);
>  	void				(*unpark)(unsigned int cpu);
> +	bool				selfparking;
>  	const char			*thread_comm;
>  };
> 
> Index: linux-2.6/kernel/smpboot.c
> ===================================================================
> --- linux-2.6.orig/kernel/smpboot.c
> +++ linux-2.6/kernel/smpboot.c
> @@ -183,9 +183,10 @@ __smpboot_create_thread(struct smp_hotpl
>  		kfree(td);
>  		return PTR_ERR(tsk);
>  	}
> -
>  	get_task_struct(tsk);
>  	*per_cpu_ptr(ht->store, cpu) = tsk;
> +	if (ht->create)
> +		ht->create(cpu);
>  	return 0;
>  }
> 
> @@ -225,7 +226,7 @@ static void smpboot_park_thread(struct s
>  {
>  	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
> 
> -	if (tsk)
> +	if (tsk && !ht->selfparking)
>  		kthread_park(tsk);
>  }
> 
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 02/40] stop_machine: Store task reference in a separate per cpu variable
  2013-01-31 12:11 ` [patch 02/40] stop_machine: Store task reference in a separate per cpu variable Thomas Gleixner
@ 2013-02-09  0:33   ` Paul E. McKenney
  2013-02-14 17:47   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
  1 sibling, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-09  0:33 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:13PM -0000, Thomas Gleixner wrote:
> To allow the stopper thread being managed by the smpboot thread
> infrastructure separate out the task storage from the stopper data
> structure.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  kernel/stop_machine.c |   32 ++++++++++++++++----------------
>  1 file changed, 16 insertions(+), 16 deletions(-)
> 
> Index: linux-2.6/kernel/stop_machine.c
> ===================================================================
> --- linux-2.6.orig/kernel/stop_machine.c
> +++ linux-2.6/kernel/stop_machine.c
> @@ -37,10 +37,10 @@ struct cpu_stopper {
>  	spinlock_t		lock;
>  	bool			enabled;	/* is this stopper enabled? */
>  	struct list_head	works;		/* list of pending works */
> -	struct task_struct	*thread;	/* stopper thread */
>  };
> 
>  static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
> +static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
>  static bool stop_machine_initialized = false;
> 
>  static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
> @@ -62,16 +62,18 @@ static void cpu_stop_signal_done(struct 
>  }
> 
>  /* queue @work to @stopper.  if offline, @work is completed immediately */
> -static void cpu_stop_queue_work(struct cpu_stopper *stopper,
> -				struct cpu_stop_work *work)
> +static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
>  {
> +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
> +	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
> +
>  	unsigned long flags;
> 
>  	spin_lock_irqsave(&stopper->lock, flags);
> 
>  	if (stopper->enabled) {
>  		list_add_tail(&work->list, &stopper->works);
> -		wake_up_process(stopper->thread);
> +		wake_up_process(p);
>  	} else
>  		cpu_stop_signal_done(work->done, false);
> 
> @@ -108,7 +110,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
>  	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
> 
>  	cpu_stop_init_done(&done, 1);
> -	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
> +	cpu_stop_queue_work(cpu, &work);
>  	wait_for_completion(&done.completion);
>  	return done.executed ? done.ret : -ENOENT;
>  }
> @@ -130,7 +132,7 @@ void stop_one_cpu_nowait(unsigned int cp
>  			struct cpu_stop_work *work_buf)
>  {
>  	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
> -	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
> +	cpu_stop_queue_work(cpu, work_buf);
>  }
> 
>  /* static data for stop_cpus */
> @@ -159,8 +161,7 @@ static void queue_stop_cpus_work(const s
>  	 */
>  	preempt_disable();
>  	for_each_cpu(cpu, cpumask)
> -		cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
> -				    &per_cpu(stop_cpus_work, cpu));
> +		cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
>  	preempt_enable();
>  }
> 
> @@ -304,12 +305,11 @@ static int __cpuinit cpu_stop_cpu_callba
>  {
>  	unsigned int cpu = (unsigned long)hcpu;
>  	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
> -	struct task_struct *p;
> +	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
> 
>  	switch (action & ~CPU_TASKS_FROZEN) {
>  	case CPU_UP_PREPARE:
> -		BUG_ON(stopper->thread || stopper->enabled ||
> -		       !list_empty(&stopper->works));
> +		BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
>  		p = kthread_create_on_node(cpu_stopper_thread,
>  					   stopper,
>  					   cpu_to_node(cpu),
> @@ -319,12 +319,12 @@ static int __cpuinit cpu_stop_cpu_callba
>  		get_task_struct(p);
>  		kthread_bind(p, cpu);
>  		sched_set_stop_task(cpu, p);
> -		stopper->thread = p;
> +		per_cpu(cpu_stopper_task, cpu) = p;
>  		break;
> 
>  	case CPU_ONLINE:
>  		/* strictly unnecessary, as first user will wake it */
> -		wake_up_process(stopper->thread);
> +		wake_up_process(p);
>  		/* mark enabled */
>  		spin_lock_irq(&stopper->lock);
>  		stopper->enabled = true;
> @@ -339,7 +339,7 @@ static int __cpuinit cpu_stop_cpu_callba
> 
>  		sched_set_stop_task(cpu, NULL);
>  		/* kill the stopper */
> -		kthread_stop(stopper->thread);
> +		kthread_stop(p);
>  		/* drain remaining works */
>  		spin_lock_irq(&stopper->lock);
>  		list_for_each_entry(work, &stopper->works, list)
> @@ -347,8 +347,8 @@ static int __cpuinit cpu_stop_cpu_callba
>  		stopper->enabled = false;
>  		spin_unlock_irq(&stopper->lock);
>  		/* release the stopper */
> -		put_task_struct(stopper->thread);
> -		stopper->thread = NULL;
> +		put_task_struct(p);
> +		per_cpu(cpu_stopper_task, cpu) = NULL;
>  		break;
>  	}
>  #endif
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 03/40] stop_machine: Use smpboot threads
  2013-01-31 12:11 ` [patch 03/40] stop_machine: Use smpboot threads Thomas Gleixner
@ 2013-02-09  0:39   ` Paul E. McKenney
  2013-02-14 17:49   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
  1 sibling, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-09  0:39 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:14PM -0000, Thomas Gleixner wrote:
> Use the smpboot thread infrastructure. Mark the stopper thread
> selfparking and park it after it has finished the take_cpu_down()
> work.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

One grammar nit, other than that:

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  kernel/cpu.c          |    2 
>  kernel/stop_machine.c |  134 ++++++++++++++++++--------------------------------
>  2 files changed, 51 insertions(+), 85 deletions(-)
> 
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -254,6 +254,8 @@ static int __ref take_cpu_down(void *_pa
>  		return err;
> 
>  	cpu_notify(CPU_DYING | param->mod, param->hcpu);
> +	/* Park the stopper thread */
> +	kthread_park(current);
>  	return 0;
>  }
> 
> Index: linux-2.6/kernel/stop_machine.c
> ===================================================================
> --- linux-2.6.orig/kernel/stop_machine.c
> +++ linux-2.6/kernel/stop_machine.c
> @@ -18,7 +18,7 @@
>  #include <linux/stop_machine.h>
>  #include <linux/interrupt.h>
>  #include <linux/kallsyms.h>
> -
> +#include <linux/smpboot.h>
>  #include <linux/atomic.h>
> 
>  /*
> @@ -245,20 +245,25 @@ int try_stop_cpus(const struct cpumask *
>  	return ret;
>  }
> 
> -static int cpu_stopper_thread(void *data)
> +static int cpu_stop_should_run(unsigned int cpu)
> +{
> +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
> +	unsigned long flags;
> +	int run;
> +
> +	spin_lock_irqsave(&stopper->lock, flags);
> +	run = !list_empty(&stopper->works);
> +	spin_unlock_irqrestore(&stopper->lock, flags);
> +	return run;
> +}
> +
> +static void cpu_stopper_thread(unsigned int cpu)
>  {
> -	struct cpu_stopper *stopper = data;
> +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
>  	struct cpu_stop_work *work;
>  	int ret;
> 
>  repeat:
> -	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
> -
> -	if (kthread_should_stop()) {
> -		__set_current_state(TASK_RUNNING);
> -		return 0;
> -	}
> -
>  	work = NULL;
>  	spin_lock_irq(&stopper->lock);
>  	if (!list_empty(&stopper->works)) {
> @@ -274,8 +279,6 @@ repeat:
>  		struct cpu_stop_done *done = work->done;
>  		char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
> 
> -		__set_current_state(TASK_RUNNING);
> -
>  		/* cpu stop callbacks are not allowed to sleep */
>  		preempt_disable();
> 
> @@ -291,87 +294,55 @@ repeat:
>  					  ksym_buf), arg);
> 
>  		cpu_stop_signal_done(done, true);
> -	} else
> -		schedule();
> -
> -	goto repeat;
> +		goto repeat;
> +	}
>  }
> 
>  extern void sched_set_stop_task(int cpu, struct task_struct *stop);
> 
> -/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
> -static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
> -					   unsigned long action, void *hcpu)
> +static void cpu_stop_create(unsigned int cpu)
> +{
> +	sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
> +}
> +
> +static void cpu_stop_park(unsigned int cpu)
>  {
> -	unsigned int cpu = (unsigned long)hcpu;
>  	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
> -	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
> +	struct cpu_stop_work *work;
> +	unsigned long flags;
> 
> -	switch (action & ~CPU_TASKS_FROZEN) {
> -	case CPU_UP_PREPARE:
> -		BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
> -		p = kthread_create_on_node(cpu_stopper_thread,
> -					   stopper,
> -					   cpu_to_node(cpu),
> -					   "migration/%d", cpu);
> -		if (IS_ERR(p))
> -			return notifier_from_errno(PTR_ERR(p));
> -		get_task_struct(p);
> -		kthread_bind(p, cpu);
> -		sched_set_stop_task(cpu, p);
> -		per_cpu(cpu_stopper_task, cpu) = p;
> -		break;
> +	/* drain remaining works */

s/works/work/

> +	spin_lock_irqsave(&stopper->lock, flags);
> +	list_for_each_entry(work, &stopper->works, list)
> +		cpu_stop_signal_done(work->done, false);
> +	stopper->enabled = false;
> +	spin_unlock_irqrestore(&stopper->lock, flags);
> +}
> 
> -	case CPU_ONLINE:
> -		/* strictly unnecessary, as first user will wake it */
> -		wake_up_process(p);
> -		/* mark enabled */
> -		spin_lock_irq(&stopper->lock);
> -		stopper->enabled = true;
> -		spin_unlock_irq(&stopper->lock);
> -		break;
> -
> -#ifdef CONFIG_HOTPLUG_CPU
> -	case CPU_UP_CANCELED:
> -	case CPU_POST_DEAD:
> -	{
> -		struct cpu_stop_work *work;
> -
> -		sched_set_stop_task(cpu, NULL);
> -		/* kill the stopper */
> -		kthread_stop(p);
> -		/* drain remaining works */
> -		spin_lock_irq(&stopper->lock);
> -		list_for_each_entry(work, &stopper->works, list)
> -			cpu_stop_signal_done(work->done, false);
> -		stopper->enabled = false;
> -		spin_unlock_irq(&stopper->lock);
> -		/* release the stopper */
> -		put_task_struct(p);
> -		per_cpu(cpu_stopper_task, cpu) = NULL;
> -		break;
> -	}
> -#endif
> -	}
> +static void cpu_stop_unpark(unsigned int cpu)
> +{
> +	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
> 
> -	return NOTIFY_OK;
> +	spin_lock_irq(&stopper->lock);
> +	stopper->enabled = true;
> +	spin_unlock_irq(&stopper->lock);
>  }
> 
> -/*
> - * Give it a higher priority so that cpu stopper is available to other
> - * cpu notifiers.  It currently shares the same priority as sched
> - * migration_notifier.
> - */
> -static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
> -	.notifier_call	= cpu_stop_cpu_callback,
> -	.priority	= 10,
> +static struct smp_hotplug_thread cpu_stop_threads = {
> +	.store			= &cpu_stopper_task,
> +	.thread_should_run	= cpu_stop_should_run,
> +	.thread_fn		= cpu_stopper_thread,
> +	.thread_comm		= "migration/%u",
> +	.create			= cpu_stop_create,
> +	.setup			= cpu_stop_unpark,
> +	.park			= cpu_stop_park,
> +	.unpark			= cpu_stop_unpark,
> +	.selfparking		= true,
>  };
> 
>  static int __init cpu_stop_init(void)
>  {
> -	void *bcpu = (void *)(long)smp_processor_id();
>  	unsigned int cpu;
> -	int err;
> 
>  	for_each_possible_cpu(cpu) {
>  		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
> @@ -380,15 +351,8 @@ static int __init cpu_stop_init(void)
>  		INIT_LIST_HEAD(&stopper->works);
>  	}
> 
> -	/* start one for the boot cpu */
> -	err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
> -				    bcpu);
> -	BUG_ON(err != NOTIFY_OK);
> -	cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
> -	register_cpu_notifier(&cpu_stop_cpu_notifier);
> -
> +	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
>  	stop_machine_initialized = true;
> -
>  	return 0;
>  }
>  early_initcall(cpu_stop_init);
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 05/40] cpu: Restructure cpu_down code
  2013-01-31 12:11 ` [patch 05/40] cpu: Restructure cpu_down code Thomas Gleixner
@ 2013-02-09  0:49   ` Paul E. McKenney
  2014-10-09 17:05   ` Borislav Petkov
  1 sibling, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-09  0:49 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:15PM -0000, Thomas Gleixner wrote:
> Split out into separate functions, so we can convert it to a state machine.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  kernel/cpu.c |   69 ++++++++++++++++++++++++++++++++++++++++-------------------
>  1 file changed, 47 insertions(+), 22 deletions(-)
> 
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -168,6 +168,43 @@ static int cpu_notify(unsigned long val,
>  	return __cpu_notify(val, cpu, -1, NULL);
>  }
> 
> +/* Notifier wrappers for transitioning to state machine */
> +static int notify_prepare(unsigned int cpu)
> +{
> +	int nr_calls = 0;
> +	int ret;
> +
> +	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
> +	if (ret) {
> +		nr_calls--;
> +		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
> +				__func__, cpu);
> +		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
> +	}
> +	return ret;
> +}
> +
> +static int notify_online(unsigned int cpu)
> +{
> +	cpu_notify(CPU_ONLINE, cpu);
> +	return 0;
> +}
> +
> +static int bringup_cpu(unsigned int cpu)
> +{
> +	struct task_struct *idle = idle_thread_get(cpu);
> +	int ret;
> +
> +	/* Arch-specific enabling code. */
> +	ret = __cpu_up(cpu, idle);
> +	if (ret) {
> +		cpu_notify(CPU_UP_CANCELED, cpu);
> +		return ret;
> +	}
> +	BUG_ON(!cpu_online(cpu));
> +	return 0;
> +}
> +
>  #ifdef CONFIG_HOTPLUG_CPU
> 
>  static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
> @@ -340,7 +377,7 @@ EXPORT_SYMBOL(cpu_down);
>  static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
>  {
>  	struct task_struct *idle;
> -	int ret, nr_calls = 0;
> +	int ret;
> 
>  	cpu_hotplug_begin();
> 
> @@ -355,35 +392,23 @@ static int __cpuinit _cpu_up(unsigned in
>  		goto out;
>  	}
> 
> +	cpuhp_tasks_frozen = tasks_frozen;
> +
>  	ret = smpboot_create_threads(cpu);
>  	if (ret)
>  		goto out;
> 
> -	cpuhp_tasks_frozen = tasks_frozen;
> -
> -	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
> -	if (ret) {
> -		nr_calls--;
> -		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
> -				__func__, cpu);
> -		goto out_notify;
> -	}
> +	ret = notify_prepare(cpu);
> +	if (ret)
> +		goto out;
> 
> -	/* Arch-specific enabling code. */
> -	ret = __cpu_up(cpu, idle);
> -	if (ret != 0)
> -		goto out_notify;
> -	BUG_ON(!cpu_online(cpu));
> +	ret = bringup_cpu(cpu);
> +	if (ret)
> +		goto out;
> 
>  	/* Wake the per cpu threads */
>  	smpboot_unpark_threads(cpu);
> -
> -	/* Now call notifier in preparation. */
> -	cpu_notify(CPU_ONLINE, cpu);
> -
> -out_notify:
> -	if (ret != 0)
> -		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
> +	notify_online(cpu);
>  out:
>  	cpu_hotplug_done();
> 
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 04/40] cpu: Restructure FROZEN state handling
  2013-01-31 12:11 ` [patch 04/40] cpu: Restructure FROZEN state handling Thomas Gleixner
@ 2013-02-09  0:52   ` Paul E. McKenney
  2014-10-09 16:53   ` Borislav Petkov
  1 sibling, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-09  0:52 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:15PM -0000, Thomas Gleixner wrote:
> There are only a few callbacks which really care about FROZEN
> vs. !FROZEN. No need to have extra states for this. 
> 
> Publish the frozen state in an extra variable which is updated under
> the hotplug lock and let the users interested deal with it w/o
> imposing that extra state checks on everyone.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Getting rid of all the _FROZEN variants of the notifier actions would
be good!

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  kernel/cpu.c |   66 ++++++++++++++++++++++++-----------------------------------
>  1 file changed, 27 insertions(+), 39 deletions(-)
> 
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -25,6 +25,7 @@
>  #ifdef CONFIG_SMP
>  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
>  static DEFINE_MUTEX(cpu_add_remove_lock);
> +static bool cpuhp_tasks_frozen;
> 
>  /*
>   * The following two API's must be used when attempting
> @@ -148,27 +149,30 @@ int __ref register_cpu_notifier(struct n
>  	return ret;
>  }
> 
> -static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
> +static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
>  			int *nr_calls)
>  {
> +	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
> +	void *hcpu = (void *)(long)cpu;
> +
>  	int ret;
> 
> -	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
> +	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
>  					nr_calls);
> 
>  	return notifier_to_errno(ret);
>  }
> 
> -static int cpu_notify(unsigned long val, void *v)
> +static int cpu_notify(unsigned long val, unsigned int cpu)
>  {
> -	return __cpu_notify(val, v, -1, NULL);
> +	return __cpu_notify(val, cpu, -1, NULL);
>  }
> 
>  #ifdef CONFIG_HOTPLUG_CPU
> 
> -static void cpu_notify_nofail(unsigned long val, void *v)
> +static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
>  {
> -	BUG_ON(cpu_notify(val, v));
> +	BUG_ON(cpu_notify(val, cpu));
>  }
>  EXPORT_SYMBOL(register_cpu_notifier);
> 
> @@ -237,23 +241,17 @@ static inline void check_for_tasks(int c
>  	write_unlock_irq(&tasklist_lock);
>  }
> 
> -struct take_cpu_down_param {
> -	unsigned long mod;
> -	void *hcpu;
> -};
> -
>  /* Take this CPU down. */
>  static int __ref take_cpu_down(void *_param)
>  {
> -	struct take_cpu_down_param *param = _param;
> -	int err;
> +	int err, cpu = smp_processor_id();
> 
>  	/* Ensure this CPU doesn't handle any more interrupts. */
>  	err = __cpu_disable();
>  	if (err < 0)
>  		return err;
> 
> -	cpu_notify(CPU_DYING | param->mod, param->hcpu);
> +	cpu_notify(CPU_DYING, cpu);
>  	/* Park the stopper thread */
>  	kthread_park(current);
>  	return 0;
> @@ -263,12 +261,6 @@ static int __ref take_cpu_down(void *_pa
>  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
>  {
>  	int err, nr_calls = 0;
> -	void *hcpu = (void *)(long)cpu;
> -	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
> -	struct take_cpu_down_param tcd_param = {
> -		.mod = mod,
> -		.hcpu = hcpu,
> -	};
> 
>  	if (num_online_cpus() == 1)
>  		return -EBUSY;
> @@ -278,21 +270,23 @@ static int __ref _cpu_down(unsigned int 
> 
>  	cpu_hotplug_begin();
> 
> -	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
> +	cpuhp_tasks_frozen = tasks_frozen;
> +
> +	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
>  	if (err) {
>  		nr_calls--;
> -		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
> +		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
>  		printk("%s: attempt to take down CPU %u failed\n",
>  				__func__, cpu);
>  		goto out_release;
>  	}
>  	smpboot_park_threads(cpu);
> 
> -	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
> +	err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
>  	if (err) {
>  		/* CPU didn't die: tell everyone.  Can't complain. */
>  		smpboot_unpark_threads(cpu);
> -		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
> +		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
>  		goto out_release;
>  	}
>  	BUG_ON(cpu_online(cpu));
> @@ -311,14 +305,14 @@ static int __ref _cpu_down(unsigned int 
>  	__cpu_die(cpu);
> 
>  	/* CPU is completely dead: tell everyone.  Too late to complain. */
> -	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
> +	cpu_notify_nofail(CPU_DEAD, cpu);
> 
>  	check_for_tasks(cpu);
> 
>  out_release:
>  	cpu_hotplug_done();
>  	if (!err)
> -		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
> +		cpu_notify_nofail(CPU_POST_DEAD, cpu);
>  	return err;
>  }
> 
> @@ -345,10 +339,8 @@ EXPORT_SYMBOL(cpu_down);
>  /* Requires cpu_add_remove_lock to be held */
>  static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
>  {
> -	int ret, nr_calls = 0;
> -	void *hcpu = (void *)(long)cpu;
> -	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
>  	struct task_struct *idle;
> +	int ret, nr_calls = 0;
> 
>  	cpu_hotplug_begin();
> 
> @@ -367,7 +359,9 @@ static int __cpuinit _cpu_up(unsigned in
>  	if (ret)
>  		goto out;
> 
> -	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
> +	cpuhp_tasks_frozen = tasks_frozen;
> +
> +	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
>  	if (ret) {
>  		nr_calls--;
>  		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
> @@ -385,11 +379,11 @@ static int __cpuinit _cpu_up(unsigned in
>  	smpboot_unpark_threads(cpu);
> 
>  	/* Now call notifier in preparation. */
> -	cpu_notify(CPU_ONLINE | mod, hcpu);
> +	cpu_notify(CPU_ONLINE, cpu);
> 
>  out_notify:
>  	if (ret != 0)
> -		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
> +		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
>  out:
>  	cpu_hotplug_done();
> 
> @@ -627,13 +621,7 @@ core_initcall(cpu_hotplug_pm_sync_init);
>   */
>  void __cpuinit notify_cpu_starting(unsigned int cpu)
>  {
> -	unsigned long val = CPU_STARTING;
> -
> -#ifdef CONFIG_PM_SLEEP_SMP
> -	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
> -		val = CPU_STARTING_FROZEN;
> -#endif /* CONFIG_PM_SLEEP_SMP */
> -	cpu_notify(val, (void *)(long)cpu);
> +	cpu_notify(CPU_STARTING, cpu);
>  }
> 
>  #endif /* CONFIG_SMP */
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 06/40] cpu: hotplug: Split out cpu down functions
  2013-01-31 12:11 ` [patch 06/40] cpu: hotplug: Split out cpu down functions Thomas Gleixner
@ 2013-02-09  0:54   ` Paul E. McKenney
  0 siblings, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-09  0:54 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:16PM -0000, Thomas Gleixner wrote:
> Split cpu_down in separate functions in preparation for state machine
> conversion.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  kernel/cpu.c |   83 +++++++++++++++++++++++++++++++++++++----------------------
>  1 file changed, 53 insertions(+), 30 deletions(-)
> 
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -206,11 +206,6 @@ static int bringup_cpu(unsigned int cpu)
>  }
> 
>  #ifdef CONFIG_HOTPLUG_CPU
> -
> -static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
> -{
> -	BUG_ON(cpu_notify(val, cpu));
> -}
>  EXPORT_SYMBOL(register_cpu_notifier);
> 
>  void __ref unregister_cpu_notifier(struct notifier_block *nb)
> @@ -278,6 +273,25 @@ static inline void check_for_tasks(int c
>  	write_unlock_irq(&tasklist_lock);
>  }
> 
> +static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
> +{
> +	BUG_ON(cpu_notify(val, cpu));
> +}
> +
> +static int notify_down_prepare(unsigned int cpu)
> +{
> +	int err, nr_calls = 0;
> +
> +	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
> +	if (err) {
> +		nr_calls--;
> +		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
> +		printk("%s: attempt to take down CPU %u failed\n",
> +				__func__, cpu);
> +	}
> +	return err;
> +}
> +
>  /* Take this CPU down. */
>  static int __ref take_cpu_down(void *_param)
>  {
> @@ -294,37 +308,17 @@ static int __ref take_cpu_down(void *_pa
>  	return 0;
>  }
> 
> -/* Requires cpu_add_remove_lock to be held */
> -static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
> +static int takedown_cpu(unsigned int cpu)
>  {
> -	int err, nr_calls = 0;
> -
> -	if (num_online_cpus() == 1)
> -		return -EBUSY;
> -
> -	if (!cpu_online(cpu))
> -		return -EINVAL;
> -
> -	cpu_hotplug_begin();
> -
> -	cpuhp_tasks_frozen = tasks_frozen;
> +	int err;
> 
> -	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
> -	if (err) {
> -		nr_calls--;
> -		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
> -		printk("%s: attempt to take down CPU %u failed\n",
> -				__func__, cpu);
> -		goto out_release;
> -	}
>  	smpboot_park_threads(cpu);
> -
>  	err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
>  	if (err) {
>  		/* CPU didn't die: tell everyone.  Can't complain. */
>  		smpboot_unpark_threads(cpu);
>  		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
> -		goto out_release;
> +		return err;
>  	}
>  	BUG_ON(cpu_online(cpu));
> 
> @@ -341,10 +335,39 @@ static int __ref _cpu_down(unsigned int 
>  	/* This actually kills the CPU. */
>  	__cpu_die(cpu);
> 
> -	/* CPU is completely dead: tell everyone.  Too late to complain. */
> -	cpu_notify_nofail(CPU_DEAD, cpu);
> +	return 0;
> +}
> 
> +static int notify_dead(unsigned int cpu)
> +{
> +	cpu_notify_nofail(CPU_DEAD, cpu);
>  	check_for_tasks(cpu);
> +	return 0;
> +}
> +
> +/* Requires cpu_add_remove_lock to be held */
> +static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
> +{
> +	int err;
> +
> +	if (num_online_cpus() == 1)
> +		return -EBUSY;
> +
> +	if (!cpu_online(cpu))
> +		return -EINVAL;
> +
> +	cpu_hotplug_begin();
> +
> +	cpuhp_tasks_frozen = tasks_frozen;
> +
> +	err = notify_down_prepare(cpu);
> +	if (err)
> +		goto out_release;
> +	err = takedown_cpu(cpu);
> +	if (err)
> +		goto out_release;
> +
> +	notify_dead(cpu);
> 
>  out_release:
>  	cpu_hotplug_done();
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 07/40] cpu: hotplug: Convert to a state machine for the control processor
  2013-01-31 12:11 ` [patch 07/40] cpu: hotplug: Convert to a state machine for the control processor Thomas Gleixner
@ 2013-02-11 20:09   ` Paul E. McKenney
  0 siblings, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-11 20:09 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:17PM -0000, Thomas Gleixner wrote:
> Move the split out steps into a callback array and let the cpu_up/down
> code iterate through the array functions. For now most of the
> callbacks are asymetric to resemble the current hotplug maze.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  include/linux/cpu.h        |    4 +
>  include/linux/cpuhotplug.h |   16 ++++
>  init/main.c                |   15 ---
>  kernel/cpu.c               |  180 ++++++++++++++++++++++++++++++++++++---------
>  kernel/smpboot.c           |    6 +
>  kernel/smpboot.h           |    4 -
>  6 files changed, 173 insertions(+), 52 deletions(-)
> 
> Index: linux-2.6/include/linux/cpu.h
> ===================================================================
> --- linux-2.6.orig/include/linux/cpu.h
> +++ linux-2.6/include/linux/cpu.h
> @@ -26,6 +26,9 @@ struct cpu {
>  	struct device dev;
>  };
> 
> +extern void boot_cpu_init(void);
> +extern void boot_cpu_state_init(void);
> +
>  extern int register_cpu(struct cpu *cpu, int num);
>  extern struct device *get_cpu_device(unsigned cpu);
>  extern bool cpu_is_hotpluggable(unsigned cpu);
> @@ -112,6 +115,7 @@ enum {
> 
> 
>  #ifdef CONFIG_SMP
> +extern bool cpuhp_tasks_frozen;
>  /* Need to know about CPUs going up/down? */
>  #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
>  #define cpu_notifier(fn, pri) {					\
> Index: linux-2.6/include/linux/cpuhotplug.h
> ===================================================================
> --- /dev/null
> +++ linux-2.6/include/linux/cpuhotplug.h
> @@ -0,0 +1,16 @@
> +#ifndef __CPUHOTPLUG_H
> +#define __CPUHOTPLUG_H
> +
> +enum cpuhp_states {
> +	CPUHP_OFFLINE,
> +	CPUHP_CREATE_THREADS,
> +	CPUHP_NOTIFY_PREPARE,
> +	CPUHP_NOTIFY_DEAD,
> +	CPUHP_BRINGUP_CPU,
> +	CPUHP_TEARDOWN_CPU,
> +	CPUHP_PERCPU_THREADS,
> +	CPUHP_NOTIFY_ONLINE,
> +	CPUHP_NOTIFY_DOWN_PREPARE,
> +	CPUHP_MAX,
> +};
> +#endif
> Index: linux-2.6/init/main.c
> ===================================================================
> --- linux-2.6.orig/init/main.c
> +++ linux-2.6/init/main.c
> @@ -424,20 +424,6 @@ void __init parse_early_param(void)
>  	done = 1;
>  }
> 
> -/*
> - *	Activate the first processor.
> - */
> -
> -static void __init boot_cpu_init(void)
> -{
> -	int cpu = smp_processor_id();
> -	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
> -	set_cpu_online(cpu, true);
> -	set_cpu_active(cpu, true);
> -	set_cpu_present(cpu, true);
> -	set_cpu_possible(cpu, true);
> -}
> -
>  void __init __weak smp_setup_processor_id(void)
>  {
>  }
> @@ -502,6 +488,7 @@ asmlinkage void __init start_kernel(void
>  	setup_command_line(command_line);
>  	setup_nr_cpu_ids();
>  	setup_per_cpu_areas();
> +	boot_cpu_state_init();
>  	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
> 
>  	build_all_zonelists(NULL, NULL);
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -19,13 +19,24 @@
>  #include <linux/mutex.h>
>  #include <linux/gfp.h>
>  #include <linux/suspend.h>
> +#include <linux/cpuhotplug.h>
> 
>  #include "smpboot.h"
> 
> +/* CPU state */
> +static DEFINE_PER_CPU(enum cpuhp_states, cpuhp_state);
> +
> +struct cpuhp_step {
> +	int (*startup)(unsigned int cpu);
> +	int (*teardown)(unsigned int cpu);
> +};
> +
> +static struct cpuhp_step cpuhp_bp_states[];
> +
>  #ifdef CONFIG_SMP
>  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
>  static DEFINE_MUTEX(cpu_add_remove_lock);
> -static bool cpuhp_tasks_frozen;
> +bool cpuhp_tasks_frozen;
> 
>  /*
>   * The following two API's must be used when attempting
> @@ -310,13 +321,10 @@ static int __ref take_cpu_down(void *_pa
> 
>  static int takedown_cpu(unsigned int cpu)
>  {
> -	int err;
> +	int err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
> 
> -	smpboot_park_threads(cpu);
> -	err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
>  	if (err) {
>  		/* CPU didn't die: tell everyone.  Can't complain. */
> -		smpboot_unpark_threads(cpu);
>  		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
>  		return err;
>  	}
> @@ -345,10 +353,32 @@ static int notify_dead(unsigned int cpu)
>  	return 0;
>  }
> 
> +#else
> +#define notify_down_prepare	NULL
> +#define takedown_cpu		NULL
> +#define notify_dead		NULL
> +#endif
> +
> +#ifdef CONFIG_HOTPLUG_CPU
> +static void undo_cpu_down(unsigned int cpu, int step)
> +{
> +	while (step++ < CPUHP_MAX) {
> +		/*
> +		 * Transitional check. Will be removed when we have a
> +		 * fully symetric mechanism
> +		 */
> +		if (!cpuhp_bp_states[step].teardown)
> +			continue;
> +
> +		if (cpuhp_bp_states[step].startup)
> +			cpuhp_bp_states[step].startup(cpu);
> +	}
> +}
> +
>  /* Requires cpu_add_remove_lock to be held */
>  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
>  {
> -	int err;
> +	int ret = 0, step;
> 
>  	if (num_online_cpus() == 1)
>  		return -EBUSY;
> @@ -360,20 +390,23 @@ static int __ref _cpu_down(unsigned int 
> 
>  	cpuhp_tasks_frozen = tasks_frozen;
> 
> -	err = notify_down_prepare(cpu);
> -	if (err)
> -		goto out_release;
> -	err = takedown_cpu(cpu);
> -	if (err)
> -		goto out_release;
> -
> -	notify_dead(cpu);
> +	for (step = per_cpu(cpuhp_state, cpu); step > 0; step--) {
> +		if (cpuhp_bp_states[step].teardown) {
> +			ret = cpuhp_bp_states[step].teardown(cpu);
> +			if (ret) {
> +				undo_cpu_down(cpu, step + 1);
> +				step = CPUHP_MAX;
> +				break;
> +			}
> +		}
> +	}
> +	/* Store the current cpu state */
> +	per_cpu(cpuhp_state, cpu) = step;
> 
> -out_release:
>  	cpu_hotplug_done();
> -	if (!err)
> +	if (!ret)
>  		cpu_notify_nofail(CPU_POST_DEAD, cpu);
> -	return err;
> +	return ret;
>  }
> 
>  int __ref cpu_down(unsigned int cpu)
> @@ -396,11 +429,25 @@ out:
>  EXPORT_SYMBOL(cpu_down);
>  #endif /*CONFIG_HOTPLUG_CPU*/
> 
> +static void undo_cpu_up(unsigned int cpu, int step)
> +{
> +	while (step--) {
> +		/*
> +		 * Transitional check. Will be removed when we have a
> +		 * fully symetric mechanism
> +		 */
> +		if (!cpuhp_bp_states[step].startup)
> +			continue;
> +		if (cpuhp_bp_states[step].teardown)
> +			cpuhp_bp_states[step].teardown(cpu);
> +	}
> +}
> +
>  /* Requires cpu_add_remove_lock to be held */
>  static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
>  {
> +	int ret = 0, step;
>  	struct task_struct *idle;
> -	int ret;
> 
>  	cpu_hotplug_begin();
> 
> @@ -409,6 +456,7 @@ static int __cpuinit _cpu_up(unsigned in
>  		goto out;
>  	}
> 
> +	/* Let it fail before we try to bring the cpu up */
>  	idle = idle_thread_get(cpu);
>  	if (IS_ERR(idle)) {
>  		ret = PTR_ERR(idle);
> @@ -417,24 +465,20 @@ static int __cpuinit _cpu_up(unsigned in
> 
>  	cpuhp_tasks_frozen = tasks_frozen;
> 
> -	ret = smpboot_create_threads(cpu);
> -	if (ret)
> -		goto out;
> -
> -	ret = notify_prepare(cpu);
> -	if (ret)
> -		goto out;
> -
> -	ret = bringup_cpu(cpu);
> -	if (ret)
> -		goto out;
> -
> -	/* Wake the per cpu threads */
> -	smpboot_unpark_threads(cpu);
> -	notify_online(cpu);
> +	for (step = per_cpu(cpuhp_state, cpu); step < CPUHP_MAX; step++) {
> +		if (cpuhp_bp_states[step].startup) {
> +			ret = cpuhp_bp_states[step].startup(cpu);
> +			if (ret) {
> +				undo_cpu_up(cpu, step - 1);
> +				step = 0;
> +				break;
> +			}
> +		}
> +	}
> +	/* Store the current cpu state */
> +	per_cpu(cpuhp_state, cpu) = step;
>  out:
>  	cpu_hotplug_done();
> -
>  	return ret;
>  }
> 
> @@ -674,6 +718,52 @@ void __cpuinit notify_cpu_starting(unsig
> 
>  #endif /* CONFIG_SMP */
> 
> +/* Boot processor state steps */
> +static struct cpuhp_step cpuhp_bp_states[] = {
> +	[CPUHP_OFFLINE] = {
> +		.startup = NULL,
> +		.teardown = NULL,
> +	},
> +#ifdef CONFIG_SMP
> +	[CPUHP_CREATE_THREADS] = {
> +		.startup = smpboot_create_threads,
> +		.teardown = NULL,
> +	},
> +	[CPUHP_NOTIFY_PREPARE] = {
> +		.startup = notify_prepare,
> +		.teardown = NULL,
> +	},
> +	[CPUHP_NOTIFY_DEAD] = {
> +		.startup = NULL,
> +		.teardown = notify_dead,
> +	},
> +	[CPUHP_BRINGUP_CPU] = {
> +		.startup = bringup_cpu,
> +		.teardown = NULL,
> +	},
> +	[CPUHP_TEARDOWN_CPU] = {
> +		.startup = NULL,
> +		.teardown = takedown_cpu,
> +	},
> +	[CPUHP_PERCPU_THREADS] = {
> +		.startup = smpboot_unpark_threads,
> +		.teardown = smpboot_park_threads,
> +	},
> +	[CPUHP_NOTIFY_ONLINE] = {
> +		.startup = notify_online,
> +		.teardown = NULL,
> +	},
> +	[CPUHP_NOTIFY_DOWN_PREPARE] = {
> +		.startup = NULL,
> +		.teardown = notify_down_prepare,
> +	},
> +#endif
> +	[CPUHP_MAX] = {
> +		.startup = NULL,
> +		.teardown = NULL,
> +	},
> +};
> +
>  /*
>   * cpu_bit_bitmap[] is a special, "compressed" data structure that
>   * represents all NR_CPUS bits binary values of 1<<nr.
> @@ -769,3 +859,25 @@ void init_cpu_online(const struct cpumas
>  {
>  	cpumask_copy(to_cpumask(cpu_online_bits), src);
>  }
> +
> +/*
> + * Activate the first processor.
> + */
> +void __init boot_cpu_init(void)
> +{
> +	int cpu = smp_processor_id();
> +
> +	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
> +	set_cpu_online(cpu, true);
> +	set_cpu_active(cpu, true);
> +	set_cpu_present(cpu, true);
> +	set_cpu_possible(cpu, true);
> +}
> +
> +/*
> + * Must be called _AFTER_ setting up the per_cpu areas
> + */
> +void __init boot_cpu_state_init(void)
> +{
> +	per_cpu(cpuhp_state, smp_processor_id()) = CPUHP_MAX;
> +}
> Index: linux-2.6/kernel/smpboot.c
> ===================================================================
> --- linux-2.6.orig/kernel/smpboot.c
> +++ linux-2.6/kernel/smpboot.c
> @@ -212,7 +212,7 @@ static void smpboot_unpark_thread(struct
>  	kthread_unpark(tsk);
>  }
> 
> -void smpboot_unpark_threads(unsigned int cpu)
> +int smpboot_unpark_threads(unsigned int cpu)
>  {
>  	struct smp_hotplug_thread *cur;
> 
> @@ -220,6 +220,7 @@ void smpboot_unpark_threads(unsigned int
>  	list_for_each_entry(cur, &hotplug_threads, list)
>  		smpboot_unpark_thread(cur, cpu);
>  	mutex_unlock(&smpboot_threads_lock);
> +	return 0;
>  }
> 
>  static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
> @@ -230,7 +231,7 @@ static void smpboot_park_thread(struct s
>  		kthread_park(tsk);
>  }
> 
> -void smpboot_park_threads(unsigned int cpu)
> +int smpboot_park_threads(unsigned int cpu)
>  {
>  	struct smp_hotplug_thread *cur;
> 
> @@ -238,6 +239,7 @@ void smpboot_park_threads(unsigned int c
>  	list_for_each_entry_reverse(cur, &hotplug_threads, list)
>  		smpboot_park_thread(cur, cpu);
>  	mutex_unlock(&smpboot_threads_lock);
> +	return 0;
>  }
> 
>  static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
> Index: linux-2.6/kernel/smpboot.h
> ===================================================================
> --- linux-2.6.orig/kernel/smpboot.h
> +++ linux-2.6/kernel/smpboot.h
> @@ -14,7 +14,7 @@ static inline void idle_threads_init(voi
>  #endif
> 
>  int smpboot_create_threads(unsigned int cpu);
> -void smpboot_park_threads(unsigned int cpu);
> -void smpboot_unpark_threads(unsigned int cpu);
> +int smpboot_park_threads(unsigned int cpu);
> +int smpboot_unpark_threads(unsigned int cpu);
> 
>  #endif
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 08/40] cpu: hotplug: Convert the hotplugged processor work to a state machine
  2013-01-31 12:11 ` [patch 08/40] cpu: hotplug: Convert the hotplugged processor work to a state machine Thomas Gleixner
@ 2013-02-11 20:17   ` Paul E. McKenney
  0 siblings, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-11 20:17 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:18PM -0000, Thomas Gleixner wrote:
> Move the functions which need to run on the hotplugged processor into
> a state machine array and let the code iterate through these functions.
> 
> In a later state, this will grow synchronization points between the
> control processor and the hotplugged processor, so we can move the
> various architecture implementations of the synchronizations to the
> core.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  include/linux/cpuhotplug.h |    4 ++
>  kernel/cpu.c               |   70 +++++++++++++++++++++++++++++++++++----------
>  2 files changed, 59 insertions(+), 15 deletions(-)
> 
> Index: linux-2.6/include/linux/cpuhotplug.h
> ===================================================================
> --- linux-2.6.orig/include/linux/cpuhotplug.h
> +++ linux-2.6/include/linux/cpuhotplug.h
> @@ -7,6 +7,10 @@ enum cpuhp_states {
>  	CPUHP_NOTIFY_PREPARE,
>  	CPUHP_NOTIFY_DEAD,
>  	CPUHP_BRINGUP_CPU,
> +	CPUHP_AP_OFFLINE,
> +	CPUHP_AP_NOTIFY_STARTING,
> +	CPUHP_AP_NOTIFY_DYING,
> +	CPUHP_AP_MAX,
>  	CPUHP_TEARDOWN_CPU,
>  	CPUHP_PERCPU_THREADS,
>  	CPUHP_NOTIFY_ONLINE,
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -32,6 +32,7 @@ struct cpuhp_step {
>  };
> 
>  static struct cpuhp_step cpuhp_bp_states[];
> +static struct cpuhp_step cpuhp_ap_states[];
> 
>  #ifdef CONFIG_SMP
>  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
> @@ -216,6 +217,12 @@ static int bringup_cpu(unsigned int cpu)
>  	return 0;
>  }
> 
> +static int notify_starting(unsigned int cpu)
> +{
> +	cpu_notify(CPU_STARTING, cpu);
> +	return 0;
> +}
> +
>  #ifdef CONFIG_HOTPLUG_CPU
>  EXPORT_SYMBOL(register_cpu_notifier);
> 
> @@ -303,17 +310,26 @@ static int notify_down_prepare(unsigned 
>  	return err;
>  }
> 
> +static int notify_dying(unsigned int cpu)
> +{
> +	cpu_notify(CPU_DYING, cpu);
> +	return 0;
> +}
> +
>  /* Take this CPU down. */
>  static int __ref take_cpu_down(void *_param)
>  {
> -	int err, cpu = smp_processor_id();
> +	int step, err, cpu = smp_processor_id();
> 
>  	/* Ensure this CPU doesn't handle any more interrupts. */
>  	err = __cpu_disable();
>  	if (err < 0)
>  		return err;
> 
> -	cpu_notify(CPU_DYING, cpu);
> +	for (step = CPUHP_AP_MAX; step >= CPUHP_AP_OFFLINE; step--) {
> +		if (cpuhp_ap_states[step].teardown)
> +			cpuhp_ap_states[step].teardown(cpu);
> +	}
>  	/* Park the stopper thread */
>  	kthread_park(current);
>  	return 0;
> @@ -357,6 +373,7 @@ static int notify_dead(unsigned int cpu)
>  #define notify_down_prepare	NULL
>  #define takedown_cpu		NULL
>  #define notify_dead		NULL
> +#define notify_dying		NULL
>  #endif
> 
>  #ifdef CONFIG_HOTPLUG_CPU
> @@ -429,6 +446,24 @@ out:
>  EXPORT_SYMBOL(cpu_down);
>  #endif /*CONFIG_HOTPLUG_CPU*/
> 
> +/**
> + * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
> + * @cpu: cpu that just started
> + *
> + * This function calls the cpu_chain notifiers with CPU_STARTING.
> + * It must be called by the arch code on the new cpu, before the new cpu
> + * enables interrupts and before the "boot" cpu returns from __cpu_up().
> + */
> +void notify_cpu_starting(unsigned int cpu)
> +{
> +	int step;
> +
> +	for (step = CPUHP_AP_OFFLINE; step <  CPUHP_AP_MAX; step++) {
> +		if (cpuhp_ap_states[step].startup)
> +			cpuhp_ap_states[step].startup(cpu);
> +	}
> +}
> +
>  static void undo_cpu_up(unsigned int cpu, int step)
>  {
>  	while (step--) {
> @@ -703,19 +738,6 @@ core_initcall(cpu_hotplug_pm_sync_init);
> 
>  #endif /* CONFIG_PM_SLEEP_SMP */
> 
> -/**
> - * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
> - * @cpu: cpu that just started
> - *
> - * This function calls the cpu_chain notifiers with CPU_STARTING.
> - * It must be called by the arch code on the new cpu, before the new cpu
> - * enables interrupts and before the "boot" cpu returns from __cpu_up().
> - */
> -void __cpuinit notify_cpu_starting(unsigned int cpu)
> -{
> -	cpu_notify(CPU_STARTING, cpu);
> -}
> -
>  #endif /* CONFIG_SMP */
> 
>  /* Boot processor state steps */
> @@ -764,6 +786,24 @@ static struct cpuhp_step cpuhp_bp_states
>  	},
>  };
> 
> +/* Application processor state steps */
> +static struct cpuhp_step cpuhp_ap_states[] = {
> +#ifdef CONFIG_SMP
> +	[CPUHP_AP_NOTIFY_STARTING] = {
> +		.startup = notify_starting,
> +		.teardown = NULL,
> +	},
> +	[CPUHP_AP_NOTIFY_DYING] = {
> +		.startup = NULL,
> +		.teardown = notify_dying,
> +	},
> +#endif
> +	[CPUHP_MAX] = {
> +		.startup = NULL,
> +		.teardown = NULL,
> +	},
> +};
> +
>  /*
>   * cpu_bit_bitmap[] is a special, "compressed" data structure that
>   * represents all NR_CPUS bits binary values of 1<<nr.
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 10/40] sched: Convert to state machine callbacks
  2013-01-31 12:11 ` [patch 10/40] sched: Convert to state machine callbacks Thomas Gleixner
@ 2013-02-11 23:46   ` Paul E. McKenney
  0 siblings, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-11 23:46 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:19PM -0000, Thomas Gleixner wrote:
> The scheduler sports quite a bunch of hotplug notifiers. One reason
> for multiple notifiers is the fact, that the startup and teardown
> process are asymetric. Now the scheduler wants to be called early on
> startup and late on teardown. That requires to install two different
> notifiers for the same issue.
> 
> With the state machine implementation we can register a callback pair
> for startup and teardown at the appropriate spot.
> 
> This patch converts the notifiers which are setup with special
> priorities and combines CPU_PRI_SCHED and CPU_PRI_CPUSET notifiers to
> a single callback. They run back to back anyway and we can make sure
> in the callbacks that the ordering inside the scheduler is
> correct. These notifiers are installed in sched_init_smp() as we can't
> run them during the bringup of the non boot cpus because the smp
> scheduler is setup after that. It would be nice if we just could
> compile them in, but that needs a larger surgery to the scheduler code
> and is beyond the scope of this patch.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

> ---
>  include/linux/cpu.h        |   16 ----
>  include/linux/cpuhotplug.h |    6 +
>  kernel/cpu.c               |    4 +
>  kernel/sched/core.c        |  154 +++++++++++++++++----------------------------
>  4 files changed, 69 insertions(+), 111 deletions(-)
> 
> Index: linux-2.6/include/linux/cpu.h
> ===================================================================
> --- linux-2.6.orig/include/linux/cpu.h
> +++ linux-2.6/include/linux/cpu.h
> @@ -58,22 +58,6 @@ extern ssize_t arch_print_cpu_modalias(s
>   * CPU notifier priorities.
>   */
>  enum {
> -	/*
> -	 * SCHED_ACTIVE marks a cpu which is coming up active during
> -	 * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
> -	 * notifier.  CPUSET_ACTIVE adjusts cpuset according to
> -	 * cpu_active mask right after SCHED_ACTIVE.  During
> -	 * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
> -	 * ordered in the similar way.
> -	 *
> -	 * This ordering guarantees consistent cpu_active mask and
> -	 * migration behavior to all cpu notifiers.
> -	 */
> -	CPU_PRI_SCHED_ACTIVE	= INT_MAX,
> -	CPU_PRI_CPUSET_ACTIVE	= INT_MAX - 1,
> -	CPU_PRI_SCHED_INACTIVE	= INT_MIN + 1,
> -	CPU_PRI_CPUSET_INACTIVE	= INT_MIN,
> -
>  	/* migration should happen before other stuff but after perf */
>  	CPU_PRI_PERF		= 20,
>  	CPU_PRI_MIGRATION	= 10,
> Index: linux-2.6/include/linux/cpuhotplug.h
> ===================================================================
> --- linux-2.6.orig/include/linux/cpuhotplug.h
> +++ linux-2.6/include/linux/cpuhotplug.h
> @@ -6,13 +6,16 @@ enum cpuhp_states {
>  	CPUHP_CREATE_THREADS,
>  	CPUHP_NOTIFY_PREPARE,
>  	CPUHP_NOTIFY_DEAD,
> +	CPUHP_SCHED_DEAD,
>  	CPUHP_BRINGUP_CPU,
>  	CPUHP_AP_OFFLINE,
> +	CPUHP_AP_SCHED_STARTING,
>  	CPUHP_AP_NOTIFY_STARTING,
>  	CPUHP_AP_NOTIFY_DYING,
>  	CPUHP_AP_MAX,
>  	CPUHP_TEARDOWN_CPU,
>  	CPUHP_PERCPU_THREADS,
> +	CPUHP_SCHED_ONLINE,
>  	CPUHP_NOTIFY_ONLINE,
>  	CPUHP_NOTIFY_DOWN_PREPARE,
>  	CPUHP_MAX,
> @@ -87,4 +90,7 @@ static inline void cpuhp_remove_state_no
>  	__cpuhp_remove_state(state, false);
>  }
> 
> +/* Compiled in scheduler hotplug functions */
> +int sched_starting_cpu(unsigned int cpu);
> +
>  #endif
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -788,6 +788,10 @@ static struct cpuhp_step cpuhp_bp_states
>  /* Application processor state steps */
>  static struct cpuhp_step cpuhp_ap_states[] = {
>  #ifdef CONFIG_SMP
> +	[CPUHP_AP_SCHED_STARTING] = {
> +		.startup = sched_starting_cpu,
> +		.teardown = NULL,
> +	},
>  	[CPUHP_AP_NOTIFY_STARTING] = {
>  		.startup = notify_starting,
>  		.teardown = NULL,
> Index: linux-2.6/kernel/sched/core.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched/core.c
> +++ linux-2.6/kernel/sched/core.c
> @@ -5167,31 +5167,6 @@ static struct notifier_block __cpuinitda
>  	.priority = CPU_PRI_MIGRATION,
>  };
> 
> -static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
> -				      unsigned long action, void *hcpu)
> -{
> -	switch (action & ~CPU_TASKS_FROZEN) {
> -	case CPU_STARTING:
> -	case CPU_DOWN_FAILED:
> -		set_cpu_active((long)hcpu, true);
> -		return NOTIFY_OK;
> -	default:
> -		return NOTIFY_DONE;
> -	}
> -}
> -
> -static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
> -					unsigned long action, void *hcpu)
> -{
> -	switch (action & ~CPU_TASKS_FROZEN) {
> -	case CPU_DOWN_PREPARE:
> -		set_cpu_active((long)hcpu, false);
> -		return NOTIFY_OK;
> -	default:
> -		return NOTIFY_DONE;
> -	}
> -}
> -
>  static int __init migration_init(void)
>  {
>  	void *cpu = (void *)(long)smp_processor_id();
> @@ -5203,10 +5178,6 @@ static int __init migration_init(void)
>  	migration_call(&migration_notifier, CPU_ONLINE, cpu);
>  	register_cpu_notifier(&migration_notifier);
> 
> -	/* Register cpu active notifiers */
> -	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
> -	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
> -
>  	return 0;
>  }
>  early_initcall(migration_init);
> @@ -6292,42 +6263,12 @@ static void sched_domains_numa_masks_cle
>  	}
>  }
> 
> -/*
> - * Update sched_domains_numa_masks[level][node] array when new cpus
> - * are onlined.
> - */
> -static int sched_domains_numa_masks_update(struct notifier_block *nfb,
> -					   unsigned long action,
> -					   void *hcpu)
> -{
> -	int cpu = (long)hcpu;
> -
> -	switch (action & ~CPU_TASKS_FROZEN) {
> -	case CPU_ONLINE:
> -		sched_domains_numa_masks_set(cpu);
> -		break;
> -
> -	case CPU_DEAD:
> -		sched_domains_numa_masks_clear(cpu);
> -		break;
> -
> -	default:
> -		return NOTIFY_DONE;
> -	}
> -
> -	return NOTIFY_OK;
> -}
>  #else
> -static inline void sched_init_numa(void)
> -{
> -}
> -
> -static int sched_domains_numa_masks_update(struct notifier_block *nfb,
> -					   unsigned long action,
> -					   void *hcpu)
> -{
> -	return 0;
> -}
> +static inline void sched_init_numa(void) { }
> +#ifdef CONFIG_HOTPLUG_CPU
> +static void sched_domains_numa_masks_set(int cpu) { }
> +static void sched_domains_numa_masks_clear(int cpu) { }
> +#endif
>  #endif /* CONFIG_NUMA */
> 
>  static int __sdt_alloc(const struct cpumask *cpu_map)
> @@ -6696,6 +6637,7 @@ match2:
>  	mutex_unlock(&sched_domains_mutex);
>  }
> 
> +#ifdef CONFIG_HOTPLUG_CPU
>  static int num_cpus_frozen;	/* used to mark begin/end of suspend/resume */
> 
>  /*
> @@ -6706,13 +6648,9 @@ static int num_cpus_frozen;	/* used to m
>   * If we come here as part of a suspend/resume, don't touch cpusets because we
>   * want to restore it back to its original state upon resume anyway.
>   */
> -static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
> -			     void *hcpu)
> +static void cpuset_cpu_active(void)
>  {
> -	switch (action) {
> -	case CPU_ONLINE_FROZEN:
> -	case CPU_DOWN_FAILED_FROZEN:
> -
> +	if (cpuhp_tasks_frozen) {
>  		/*
>  		 * num_cpus_frozen tracks how many CPUs are involved in suspend
>  		 * resume sequence. As long as this is not the last online
> @@ -6722,40 +6660,62 @@ static int cpuset_cpu_active(struct noti
>  		num_cpus_frozen--;
>  		if (likely(num_cpus_frozen)) {
>  			partition_sched_domains(1, NULL, NULL);
> -			break;
> +			return;
>  		}
> -
>  		/*
>  		 * This is the last CPU online operation. So fall through and
>  		 * restore the original sched domains by considering the
>  		 * cpuset configurations.
>  		 */
> -
> -	case CPU_ONLINE:
> -	case CPU_DOWN_FAILED:
> -		cpuset_update_active_cpus(true);
> -		break;
> -	default:
> -		return NOTIFY_DONE;
>  	}
> -	return NOTIFY_OK;
> +	cpuset_update_active_cpus(true);
>  }
> 
> -static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
> -			       void *hcpu)
> +static void cpuset_cpu_inactive(void)
>  {
> -	switch (action) {
> -	case CPU_DOWN_PREPARE:
> -		cpuset_update_active_cpus(false);
> -		break;
> -	case CPU_DOWN_PREPARE_FROZEN:
> +	if (cpuhp_tasks_frozen) {
>  		num_cpus_frozen++;
>  		partition_sched_domains(1, NULL, NULL);
> -		break;
> -	default:
> -		return NOTIFY_DONE;
> -	}
> -	return NOTIFY_OK;
> +	} else
> +		cpuset_update_active_cpus(false);
> +}
> +
> +static int sched_dead_cpu(unsigned int cpu)
> +{
> +	sched_domains_numa_masks_clear(cpu);
> +	return 0;
> +}
> +
> +static int sched_online_cpu(unsigned int cpu)
> +{
> +	/* Looks redundant, but we need it in case of down canceled */
> +	set_cpu_active(cpu, true);
> +	/*
> +	 * Asymetric to sched_dead_cpu, but this just fiddles with
> +	 * bits. Sigh
> +	 */
> +	sched_domains_numa_masks_set(cpu);
> +	/* This is actually symetric */
> +	cpuset_cpu_active();
> +	return 0;
> +}
> +
> +static int sched_offline_cpu(unsigned int cpu)
> +{
> +	set_cpu_active(cpu, false);
> +	cpuset_cpu_inactive();
> +	return 0;
> +}
> +#else
> +#define sched_dead_cpu		NULL
> +#define sched_online_cpu	NULL
> +#define sched_offline_cpu	NULL
> +#endif
> +
> +int __cpuinit sched_starting_cpu(unsigned int cpu)
> +{
> +	set_cpu_active(cpu, true);
> +	return 0;
>  }
> 
>  void __init sched_init_smp(void)
> @@ -6776,9 +6736,13 @@ void __init sched_init_smp(void)
>  	mutex_unlock(&sched_domains_mutex);
>  	put_online_cpus();
> 
> -	hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
> -	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
> -	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
> +	/*
> +	 * Note: These callbacks are installed late because we init
> +	 * numa and sched domains after we brought up the cpus.
> +	 */
> +	cpuhp_setup_state_nocalls(CPUHP_SCHED_DEAD, NULL, sched_dead_cpu);
> +	cpuhp_setup_state_nocalls(CPUHP_SCHED_ONLINE, sched_online_cpu,
> +				  sched_offline_cpu);
> 
>  	/* RT runtime code needs to handle some hotplug events */
>  	hotcpu_notifier(update_runtime, 0);
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 32/40] rcu: Convert rcutree to hotplug state machine
  2013-01-31 12:11 ` [patch 32/40] rcu: Convert rcutree " Thomas Gleixner
@ 2013-02-12  0:01   ` Paul E. McKenney
  2013-02-12 15:50     ` Paul E. McKenney
  0 siblings, 1 reply; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-12  0:01 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Thu, Jan 31, 2013 at 12:11:38PM -0000, Thomas Gleixner wrote:
> Do we really need so many states here ?

Well, all that RCU does for CPU_DYING is to do tracing, which could be
ditched.  Required changes called out inline below.

All that the CPU_ONLINE and CPU_DOWN_PREPARE notifiers do is set
up affinity for the RCU-boost kthreads.  These are unfortunately not
per-CPU kthreads, but perhaps something similar could be set up.  This is
strictly a performance optimization, so the CPU_ONLINE notifier could
be replaced by having the kthread check which of its CPUs was online.
Unfortunately, the same is not true of CPU_DOWN_PREPARE because if the
kthread was too slow about it, the scheduler would get annoyed about a
kthread being runnable only on offlined CPUs.

It is not clear that this is worthwhile.  Thoughts on other ways to
get this done?

							Thanx, Paul

> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>  include/linux/cpuhotplug.h |   18 ++++++++
>  kernel/cpu.c               |   12 +++++
>  kernel/rcutree.c           |   95 ++++++++++++++++++++-------------------------
>  3 files changed, 73 insertions(+), 52 deletions(-)
> 
> Index: linux-2.6/include/linux/cpuhotplug.h
> ===================================================================
> --- linux-2.6.orig/include/linux/cpuhotplug.h
> +++ linux-2.6/include/linux/cpuhotplug.h
> @@ -12,6 +12,7 @@ enum cpuhp_states {
>  	CPUHP_PERF_PREPARE,
>  	CPUHP_SCHED_MIGRATE_PREP,
>  	CPUHP_WORKQUEUE_PREP,
> +	CPUHP_RCUTREE_PREPARE,
>  	CPUHP_NOTIFY_PREPARE,
>  	CPUHP_NOTIFY_DEAD,
>  	CPUHP_CPUFREQ_DEAD,
> @@ -27,6 +28,7 @@ enum cpuhp_states {
>  	CPUHP_AP_ARM64_TIMER_STARTING,
>  	CPUHP_AP_KVM_STARTING,
>  	CPUHP_AP_NOTIFY_DYING,
> +	CPUHP_AP_RCUTREE_DYING,

Drop this.

>  	CPUHP_AP_X86_TBOOT_DYING,
>  	CPUHP_AP_S390_VTIME_DYING,
>  	CPUHP_AP_SCHED_NOHZ_DYING,
> @@ -39,6 +41,7 @@ enum cpuhp_states {
>  	CPUHP_SCHED_MIGRATE_ONLINE,
>  	CPUHP_WORKQUEUE_ONLINE,
>  	CPUHP_CPUFREQ_ONLINE,
> +	CPUHP_RCUTREE_ONLINE,
>  	CPUHP_NOTIFY_ONLINE,
>  	CPUHP_NOTIFY_DOWN_PREPARE,
>  	CPUHP_PERF_X86_UNCORE_ONLINE,
> @@ -147,4 +150,19 @@ int workqueue_prepare_cpu(unsigned int c
>  int workqueue_online_cpu(unsigned int cpu);
>  int workqueue_offline_cpu(unsigned int cpu);
> 
> +/* RCUtree hotplug events */
> +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
> +int rcutree_prepare_cpu(unsigned int cpu);
> +int rcutree_online_cpu(unsigned int cpu);
> +int rcutree_offline_cpu(unsigned int cpu);
> +int rcutree_dead_cpu(unsigned int cpu);
> +int rcutree_dying_cpu(unsigned int cpu);

And this...

> +#else
> +#define rcutree_prepare_cpu	NULL
> +#define rcutree_online_cpu	NULL
> +#define rcutree_offline_cpu	NULL
> +#define rcutree_dead_cpu	NULL
> +#define rcutree_dying_cpu	NULL

And of course this.

> +#endif
> +
>  #endif
> Index: linux-2.6/kernel/cpu.c
> ===================================================================
> --- linux-2.6.orig/kernel/cpu.c
> +++ linux-2.6/kernel/cpu.c
> @@ -755,6 +755,10 @@ static struct cpuhp_step cpuhp_bp_states
>  		.startup = workqueue_prepare_cpu,
>  		.teardown = NULL,
>  	},
> +	[CPUHP_RCUTREE_PREPARE] = {
> +		.startup = rcutree_prepare_cpu,
> +		.teardown = rcutree_dead_cpu,
> +	},
>  	[CPUHP_NOTIFY_PREPARE] = {
>  		.startup = notify_prepare,
>  		.teardown = NULL,
> @@ -787,6 +791,10 @@ static struct cpuhp_step cpuhp_bp_states
>  		.startup = workqueue_online_cpu,
>  		.teardown = workqueue_offline_cpu,
>  	},
> +	[CPUHP_RCUTREE_ONLINE] = {
> +		.startup = rcutree_online_cpu,
> +		.teardown = rcutree_offline_cpu,
> +	},
>  	[CPUHP_NOTIFY_ONLINE] = {
>  		.startup = notify_online,
>  		.teardown = NULL,
> @@ -813,6 +821,10 @@ static struct cpuhp_step cpuhp_ap_states
>  		.startup = NULL,
>  		.teardown = notify_dying,
>  	},
> +	[CPUHP_AP_RCUTREE_DYING] = {
> +		.startup = NULL,
> +		.teardown = rcutree_dying_cpu,
> +	},
>  	[CPUHP_AP_SCHED_NOHZ_DYING] = {
>  		.startup = NULL,
>  		.teardown = nohz_balance_exit_idle,
> Index: linux-2.6/kernel/rcutree.c
> ===================================================================
> --- linux-2.6.orig/kernel/rcutree.c
> +++ linux-2.6/kernel/rcutree.c
> @@ -2787,67 +2787,59 @@ rcu_init_percpu_data(int cpu, struct rcu
>  	mutex_unlock(&rsp->onoff_mutex);
>  }
> 
> -static void __cpuinit rcu_prepare_cpu(int cpu)
> +int __cpuinit rcutree_prepare_cpu(unsigned int cpu)
>  {
>  	struct rcu_state *rsp;
> 
>  	for_each_rcu_flavor(rsp)
>  		rcu_init_percpu_data(cpu, rsp,
>  				     strcmp(rsp->name, "rcu_preempt") == 0);
> +	rcu_prepare_kthreads(cpu);
> +	return 0;
>  }
> 
> -/*
> - * Handle CPU online/offline notification events.
> - */
> -static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
> -				    unsigned long action, void *hcpu)
> +int __cpuinit rcutree_dead_cpu(unsigned int cpu)
>  {
> -	long cpu = (long)hcpu;
> -	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
> -	struct rcu_node *rnp = rdp->mynode;
>  	struct rcu_state *rsp;
> -	int ret = NOTIFY_OK;
> 
> -	trace_rcu_utilization("Start CPU hotplug");
> -	switch (action) {
> -	case CPU_UP_PREPARE:
> -	case CPU_UP_PREPARE_FROZEN:
> -		rcu_prepare_cpu(cpu);
> -		rcu_prepare_kthreads(cpu);
> -		break;
> -	case CPU_ONLINE:
> -	case CPU_DOWN_FAILED:
> -		rcu_boost_kthread_setaffinity(rnp, -1);
> -		break;
> -	case CPU_DOWN_PREPARE:
> -		if (nocb_cpu_expendable(cpu))
> -			rcu_boost_kthread_setaffinity(rnp, cpu);
> -		else
> -			ret = NOTIFY_BAD;
> -		break;
> -	case CPU_DYING:
> -	case CPU_DYING_FROZEN:
> -		/*
> -		 * The whole machine is "stopped" except this CPU, so we can
> -		 * touch any data without introducing corruption. We send the
> -		 * dying CPU's callbacks to an arbitrarily chosen online CPU.
> -		 */
> -		for_each_rcu_flavor(rsp)
> -			rcu_cleanup_dying_cpu(rsp);
> -		rcu_cleanup_after_idle(cpu);
> -		break;
> -	case CPU_DEAD:
> -	case CPU_DEAD_FROZEN:
> -	case CPU_UP_CANCELED:
> -	case CPU_UP_CANCELED_FROZEN:
> -		for_each_rcu_flavor(rsp)
> -			rcu_cleanup_dead_cpu(cpu, rsp);
> -		break;
> -	default:
> -		break;
> -	}
> -	trace_rcu_utilization("End CPU hotplug");
> -	return ret;
> +	for_each_rcu_flavor(rsp)
> +		rcu_cleanup_dead_cpu(cpu, rsp);
> +	return 0;
> +}
> +
> +static void __cpuinit rcutree_affinity_setting(unsigned int cpu, int outgoing)
> +{
> +	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
> +
> +	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
> +}
> +
> +int __cpuinit rcutree_online_cpu(unsigned int cpu)
> +{
> +	rcutree_affinity_setting(cpu, -1);
> +	return 0;
> +}
> +
> +int __cpuinit rcutree_offline_cpu(unsigned int cpu)
> +{
> +	if (!nocb_cpu_expendable(cpu))
> +		return -EINVAL;
> +	rcutree_affinity_setting(cpu, cpu);
> +	return 0;
> +}
> +
> +int __cpuinit rcutree_dying_cpu(unsigned int cpu)
> +{
> +	struct rcu_state *rsp;
> +	/*
> +	 * The whole machine is "stopped" except this CPU, so we can
> +	 * touch any data without introducing corruption. We send the
> +	 * dying CPU's callbacks to an arbitrarily chosen online CPU.
> +	 */
> +	for_each_rcu_flavor(rsp)
> +		rcu_cleanup_dying_cpu(rsp);
> +	rcu_cleanup_after_idle(cpu);
> +	return 0;
>  }

And rcu_dying_cpu() above, along with both definitions of
rcu_cleanup_dying_cpu().

>  /*
> @@ -3071,9 +3063,8 @@ void __init rcu_init(void)
>  	 * this is called early in boot, before either interrupts
>  	 * or the scheduler are operational.
>  	 */
> -	cpu_notifier(rcu_cpu_notify, 0);
>  	for_each_online_cpu(cpu)
> -		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
> +		rcutree_prepare_cpu(cpu);
>  	check_cpu_stall_init();
>  }
> 
> 
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 32/40] rcu: Convert rcutree to hotplug state machine
  2013-02-12  0:01   ` Paul E. McKenney
@ 2013-02-12 15:50     ` Paul E. McKenney
  0 siblings, 0 replies; 67+ messages in thread
From: Paul E. McKenney @ 2013-02-12 15:50 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner,
	Richard Weinberger, Magnus Damm

On Mon, Feb 11, 2013 at 04:01:01PM -0800, Paul E. McKenney wrote:
> On Thu, Jan 31, 2013 at 12:11:38PM -0000, Thomas Gleixner wrote:
> > Do we really need so many states here ?
> 
> Well, all that RCU does for CPU_DYING is to do tracing, which could be
> ditched.  Required changes called out inline below.
> 
> All that the CPU_ONLINE and CPU_DOWN_PREPARE notifiers do is set
> up affinity for the RCU-boost kthreads.  These are unfortunately not
> per-CPU kthreads, but perhaps something similar could be set up.  This is
> strictly a performance optimization, so the CPU_ONLINE notifier could
> be replaced by having the kthread check which of its CPUs was online.
> Unfortunately, the same is not true of CPU_DOWN_PREPARE because if the
> kthread was too slow about it, the scheduler would get annoyed about a
> kthread being runnable only on offlined CPUs.
> 
> It is not clear that this is worthwhile.  Thoughts on other ways to
> get this done?

Actually, would there be a problem with having a tag on these tasks
so that the scheduler avoids the splat?  The idea would be to have the
scheduler break affinity if there are no longer any online CPUs in
the task's set, but just not splat.  The task could then periodically
check to see if it is running on the wrong CPU, and if there is at
least one online CPU in its set, re-affinity itself.

Seems pretty simple, so I must be missing something.  ;-)

							Thanx, Paul

> > Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> > ---
> >  include/linux/cpuhotplug.h |   18 ++++++++
> >  kernel/cpu.c               |   12 +++++
> >  kernel/rcutree.c           |   95 ++++++++++++++++++++-------------------------
> >  3 files changed, 73 insertions(+), 52 deletions(-)
> > 
> > Index: linux-2.6/include/linux/cpuhotplug.h
> > ===================================================================
> > --- linux-2.6.orig/include/linux/cpuhotplug.h
> > +++ linux-2.6/include/linux/cpuhotplug.h
> > @@ -12,6 +12,7 @@ enum cpuhp_states {
> >  	CPUHP_PERF_PREPARE,
> >  	CPUHP_SCHED_MIGRATE_PREP,
> >  	CPUHP_WORKQUEUE_PREP,
> > +	CPUHP_RCUTREE_PREPARE,
> >  	CPUHP_NOTIFY_PREPARE,
> >  	CPUHP_NOTIFY_DEAD,
> >  	CPUHP_CPUFREQ_DEAD,
> > @@ -27,6 +28,7 @@ enum cpuhp_states {
> >  	CPUHP_AP_ARM64_TIMER_STARTING,
> >  	CPUHP_AP_KVM_STARTING,
> >  	CPUHP_AP_NOTIFY_DYING,
> > +	CPUHP_AP_RCUTREE_DYING,
> 
> Drop this.
> 
> >  	CPUHP_AP_X86_TBOOT_DYING,
> >  	CPUHP_AP_S390_VTIME_DYING,
> >  	CPUHP_AP_SCHED_NOHZ_DYING,
> > @@ -39,6 +41,7 @@ enum cpuhp_states {
> >  	CPUHP_SCHED_MIGRATE_ONLINE,
> >  	CPUHP_WORKQUEUE_ONLINE,
> >  	CPUHP_CPUFREQ_ONLINE,
> > +	CPUHP_RCUTREE_ONLINE,
> >  	CPUHP_NOTIFY_ONLINE,
> >  	CPUHP_NOTIFY_DOWN_PREPARE,
> >  	CPUHP_PERF_X86_UNCORE_ONLINE,
> > @@ -147,4 +150,19 @@ int workqueue_prepare_cpu(unsigned int c
> >  int workqueue_online_cpu(unsigned int cpu);
> >  int workqueue_offline_cpu(unsigned int cpu);
> > 
> > +/* RCUtree hotplug events */
> > +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
> > +int rcutree_prepare_cpu(unsigned int cpu);
> > +int rcutree_online_cpu(unsigned int cpu);
> > +int rcutree_offline_cpu(unsigned int cpu);
> > +int rcutree_dead_cpu(unsigned int cpu);
> > +int rcutree_dying_cpu(unsigned int cpu);
> 
> And this...
> 
> > +#else
> > +#define rcutree_prepare_cpu	NULL
> > +#define rcutree_online_cpu	NULL
> > +#define rcutree_offline_cpu	NULL
> > +#define rcutree_dead_cpu	NULL
> > +#define rcutree_dying_cpu	NULL
> 
> And of course this.
> 
> > +#endif
> > +
> >  #endif
> > Index: linux-2.6/kernel/cpu.c
> > ===================================================================
> > --- linux-2.6.orig/kernel/cpu.c
> > +++ linux-2.6/kernel/cpu.c
> > @@ -755,6 +755,10 @@ static struct cpuhp_step cpuhp_bp_states
> >  		.startup = workqueue_prepare_cpu,
> >  		.teardown = NULL,
> >  	},
> > +	[CPUHP_RCUTREE_PREPARE] = {
> > +		.startup = rcutree_prepare_cpu,
> > +		.teardown = rcutree_dead_cpu,
> > +	},
> >  	[CPUHP_NOTIFY_PREPARE] = {
> >  		.startup = notify_prepare,
> >  		.teardown = NULL,
> > @@ -787,6 +791,10 @@ static struct cpuhp_step cpuhp_bp_states
> >  		.startup = workqueue_online_cpu,
> >  		.teardown = workqueue_offline_cpu,
> >  	},
> > +	[CPUHP_RCUTREE_ONLINE] = {
> > +		.startup = rcutree_online_cpu,
> > +		.teardown = rcutree_offline_cpu,
> > +	},
> >  	[CPUHP_NOTIFY_ONLINE] = {
> >  		.startup = notify_online,
> >  		.teardown = NULL,
> > @@ -813,6 +821,10 @@ static struct cpuhp_step cpuhp_ap_states
> >  		.startup = NULL,
> >  		.teardown = notify_dying,
> >  	},
> > +	[CPUHP_AP_RCUTREE_DYING] = {
> > +		.startup = NULL,
> > +		.teardown = rcutree_dying_cpu,
> > +	},
> >  	[CPUHP_AP_SCHED_NOHZ_DYING] = {
> >  		.startup = NULL,
> >  		.teardown = nohz_balance_exit_idle,
> > Index: linux-2.6/kernel/rcutree.c
> > ===================================================================
> > --- linux-2.6.orig/kernel/rcutree.c
> > +++ linux-2.6/kernel/rcutree.c
> > @@ -2787,67 +2787,59 @@ rcu_init_percpu_data(int cpu, struct rcu
> >  	mutex_unlock(&rsp->onoff_mutex);
> >  }
> > 
> > -static void __cpuinit rcu_prepare_cpu(int cpu)
> > +int __cpuinit rcutree_prepare_cpu(unsigned int cpu)
> >  {
> >  	struct rcu_state *rsp;
> > 
> >  	for_each_rcu_flavor(rsp)
> >  		rcu_init_percpu_data(cpu, rsp,
> >  				     strcmp(rsp->name, "rcu_preempt") == 0);
> > +	rcu_prepare_kthreads(cpu);
> > +	return 0;
> >  }
> > 
> > -/*
> > - * Handle CPU online/offline notification events.
> > - */
> > -static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
> > -				    unsigned long action, void *hcpu)
> > +int __cpuinit rcutree_dead_cpu(unsigned int cpu)
> >  {
> > -	long cpu = (long)hcpu;
> > -	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
> > -	struct rcu_node *rnp = rdp->mynode;
> >  	struct rcu_state *rsp;
> > -	int ret = NOTIFY_OK;
> > 
> > -	trace_rcu_utilization("Start CPU hotplug");
> > -	switch (action) {
> > -	case CPU_UP_PREPARE:
> > -	case CPU_UP_PREPARE_FROZEN:
> > -		rcu_prepare_cpu(cpu);
> > -		rcu_prepare_kthreads(cpu);
> > -		break;
> > -	case CPU_ONLINE:
> > -	case CPU_DOWN_FAILED:
> > -		rcu_boost_kthread_setaffinity(rnp, -1);
> > -		break;
> > -	case CPU_DOWN_PREPARE:
> > -		if (nocb_cpu_expendable(cpu))
> > -			rcu_boost_kthread_setaffinity(rnp, cpu);
> > -		else
> > -			ret = NOTIFY_BAD;
> > -		break;
> > -	case CPU_DYING:
> > -	case CPU_DYING_FROZEN:
> > -		/*
> > -		 * The whole machine is "stopped" except this CPU, so we can
> > -		 * touch any data without introducing corruption. We send the
> > -		 * dying CPU's callbacks to an arbitrarily chosen online CPU.
> > -		 */
> > -		for_each_rcu_flavor(rsp)
> > -			rcu_cleanup_dying_cpu(rsp);
> > -		rcu_cleanup_after_idle(cpu);
> > -		break;
> > -	case CPU_DEAD:
> > -	case CPU_DEAD_FROZEN:
> > -	case CPU_UP_CANCELED:
> > -	case CPU_UP_CANCELED_FROZEN:
> > -		for_each_rcu_flavor(rsp)
> > -			rcu_cleanup_dead_cpu(cpu, rsp);
> > -		break;
> > -	default:
> > -		break;
> > -	}
> > -	trace_rcu_utilization("End CPU hotplug");
> > -	return ret;
> > +	for_each_rcu_flavor(rsp)
> > +		rcu_cleanup_dead_cpu(cpu, rsp);
> > +	return 0;
> > +}
> > +
> > +static void __cpuinit rcutree_affinity_setting(unsigned int cpu, int outgoing)
> > +{
> > +	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
> > +
> > +	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
> > +}
> > +
> > +int __cpuinit rcutree_online_cpu(unsigned int cpu)
> > +{
> > +	rcutree_affinity_setting(cpu, -1);
> > +	return 0;
> > +}
> > +
> > +int __cpuinit rcutree_offline_cpu(unsigned int cpu)
> > +{
> > +	if (!nocb_cpu_expendable(cpu))
> > +		return -EINVAL;
> > +	rcutree_affinity_setting(cpu, cpu);
> > +	return 0;
> > +}
> > +
> > +int __cpuinit rcutree_dying_cpu(unsigned int cpu)
> > +{
> > +	struct rcu_state *rsp;
> > +	/*
> > +	 * The whole machine is "stopped" except this CPU, so we can
> > +	 * touch any data without introducing corruption. We send the
> > +	 * dying CPU's callbacks to an arbitrarily chosen online CPU.
> > +	 */
> > +	for_each_rcu_flavor(rsp)
> > +		rcu_cleanup_dying_cpu(rsp);
> > +	rcu_cleanup_after_idle(cpu);
> > +	return 0;
> >  }
> 
> And rcu_dying_cpu() above, along with both definitions of
> rcu_cleanup_dying_cpu().
> 
> >  /*
> > @@ -3071,9 +3063,8 @@ void __init rcu_init(void)
> >  	 * this is called early in boot, before either interrupts
> >  	 * or the scheduler are operational.
> >  	 */
> > -	cpu_notifier(rcu_cpu_notify, 0);
> >  	for_each_online_cpu(cpu)
> > -		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
> > +		rcutree_prepare_cpu(cpu);
> >  	check_cpu_stall_init();
> >  }
> > 
> > 
> > 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
> 


^ permalink raw reply	[flat|nested] 67+ messages in thread

* [tip:smp/hotplug] smpboot: Allow selfparking per cpu threads
  2013-01-31 12:11 ` [patch 01/40] smpboot: Allow selfparking per cpu threads Thomas Gleixner
  2013-02-09  0:29   ` Paul E. McKenney
@ 2013-02-14 17:46   ` tip-bot for Thomas Gleixner
  1 sibling, 0 replies; 67+ messages in thread
From: tip-bot for Thomas Gleixner @ 2013-02-14 17:46 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, rusty, pjt, peterz, srivatsa.bhat, rw,
	paulmck, arjan, tglx, magnus.damm

Commit-ID:  7d7e499f7333f68b7e7f67d14b9c1480913b4afb
Gitweb:     http://git.kernel.org/tip/7d7e499f7333f68b7e7f67d14b9c1480913b4afb
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Thu, 31 Jan 2013 12:11:12 +0000
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 14 Feb 2013 15:29:37 +0100

smpboot: Allow selfparking per cpu threads

The stop machine threads are still killed when a cpu goes offline. The
reason is that the thread is used to bring the cpu down, so it can't
be parked along with the other per cpu threads.

Allow a per cpu thread to be excluded from automatic parking, so it
can park itself once it's done

Add a create callback function as well.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Arjan van de Veen <arjan@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Richard Weinberger <rw@linutronix.de>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130131120741.553993267@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/smpboot.h | 5 +++++
 kernel/smpboot.c        | 5 +++--
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index e0106d8..c65dee0 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -14,6 +14,8 @@ struct smpboot_thread_data;
  * @thread_should_run:	Check whether the thread should run or not. Called with
  *			preemption disabled.
  * @thread_fn:		The associated thread function
+ * @create:		Optional setup function, called when the thread gets
+ *			created (Not called from the thread context)
  * @setup:		Optional setup function, called when the thread gets
  *			operational the first time
  * @cleanup:		Optional cleanup function, called when the thread
@@ -22,6 +24,7 @@ struct smpboot_thread_data;
  *			parked (cpu offline)
  * @unpark:		Optional unpark function, called when the thread is
  *			unparked (cpu online)
+ * @selfparking:	Thread is not parked by the park function.
  * @thread_comm:	The base name of the thread
  */
 struct smp_hotplug_thread {
@@ -29,10 +32,12 @@ struct smp_hotplug_thread {
 	struct list_head		list;
 	int				(*thread_should_run)(unsigned int cpu);
 	void				(*thread_fn)(unsigned int cpu);
+	void				(*create)(unsigned int cpu);
 	void				(*setup)(unsigned int cpu);
 	void				(*cleanup)(unsigned int cpu, bool online);
 	void				(*park)(unsigned int cpu);
 	void				(*unpark)(unsigned int cpu);
+	bool				selfparking;
 	const char			*thread_comm;
 };
 
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d6c5fc0..d4abac2 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -183,9 +183,10 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
 		kfree(td);
 		return PTR_ERR(tsk);
 	}
-
 	get_task_struct(tsk);
 	*per_cpu_ptr(ht->store, cpu) = tsk;
+	if (ht->create)
+		ht->create(cpu);
 	return 0;
 }
 
@@ -225,7 +226,7 @@ static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
 {
 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 
-	if (tsk)
+	if (tsk && !ht->selfparking)
 		kthread_park(tsk);
 }
 

^ permalink raw reply related	[flat|nested] 67+ messages in thread

* [tip:smp/hotplug] stop_machine: Store task reference in a separate per cpu variable
  2013-01-31 12:11 ` [patch 02/40] stop_machine: Store task reference in a separate per cpu variable Thomas Gleixner
  2013-02-09  0:33   ` Paul E. McKenney
@ 2013-02-14 17:47   ` tip-bot for Thomas Gleixner
  1 sibling, 0 replies; 67+ messages in thread
From: tip-bot for Thomas Gleixner @ 2013-02-14 17:47 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, rusty, pjt, peterz, srivatsa.bhat, rw,
	paulmck, arjan, tglx, magnus.damm

Commit-ID:  860a0ffaa3e1a9cf0ebb5f43d6a2a2ce67463e93
Gitweb:     http://git.kernel.org/tip/860a0ffaa3e1a9cf0ebb5f43d6a2a2ce67463e93
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Thu, 31 Jan 2013 12:11:13 +0000
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 14 Feb 2013 15:29:37 +0100

stop_machine: Store task reference in a separate per cpu variable

To allow the stopper thread being managed by the smpboot thread
infrastructure separate out the task storage from the stopper data
structure.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Arjan van de Veen <arjan@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Richard Weinberger <rw@linutronix.de>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130131120741.626690384@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/stop_machine.c | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2f194e9..aaac68c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -37,10 +37,10 @@ struct cpu_stopper {
 	spinlock_t		lock;
 	bool			enabled;	/* is this stopper enabled? */
 	struct list_head	works;		/* list of pending works */
-	struct task_struct	*thread;	/* stopper thread */
 };
 
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
 static bool stop_machine_initialized = false;
 
 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
@@ -62,16 +62,18 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
-static void cpu_stop_queue_work(struct cpu_stopper *stopper,
-				struct cpu_stop_work *work)
+static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
+
 	unsigned long flags;
 
 	spin_lock_irqsave(&stopper->lock, flags);
 
 	if (stopper->enabled) {
 		list_add_tail(&work->list, &stopper->works);
-		wake_up_process(stopper->thread);
+		wake_up_process(p);
 	} else
 		cpu_stop_signal_done(work->done, false);
 
@@ -108,7 +110,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
 	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
 
 	cpu_stop_init_done(&done, 1);
-	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+	cpu_stop_queue_work(cpu, &work);
 	wait_for_completion(&done.completion);
 	return done.executed ? done.ret : -ENOENT;
 }
@@ -130,7 +132,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
 			struct cpu_stop_work *work_buf)
 {
 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
-	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
+	cpu_stop_queue_work(cpu, work_buf);
 }
 
 /* static data for stop_cpus */
@@ -159,8 +161,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
 	 */
 	preempt_disable();
 	for_each_cpu(cpu, cpumask)
-		cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
-				    &per_cpu(stop_cpus_work, cpu));
+		cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
 	preempt_enable();
 }
 
@@ -304,12 +305,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 {
 	unsigned int cpu = (unsigned long)hcpu;
 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-	struct task_struct *p;
+	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_UP_PREPARE:
-		BUG_ON(stopper->thread || stopper->enabled ||
-		       !list_empty(&stopper->works));
+		BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
 		p = kthread_create_on_node(cpu_stopper_thread,
 					   stopper,
 					   cpu_to_node(cpu),
@@ -319,12 +319,12 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 		get_task_struct(p);
 		kthread_bind(p, cpu);
 		sched_set_stop_task(cpu, p);
-		stopper->thread = p;
+		per_cpu(cpu_stopper_task, cpu) = p;
 		break;
 
 	case CPU_ONLINE:
 		/* strictly unnecessary, as first user will wake it */
-		wake_up_process(stopper->thread);
+		wake_up_process(p);
 		/* mark enabled */
 		spin_lock_irq(&stopper->lock);
 		stopper->enabled = true;
@@ -339,7 +339,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 
 		sched_set_stop_task(cpu, NULL);
 		/* kill the stopper */
-		kthread_stop(stopper->thread);
+		kthread_stop(p);
 		/* drain remaining works */
 		spin_lock_irq(&stopper->lock);
 		list_for_each_entry(work, &stopper->works, list)
@@ -347,8 +347,8 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 		stopper->enabled = false;
 		spin_unlock_irq(&stopper->lock);
 		/* release the stopper */
-		put_task_struct(stopper->thread);
-		stopper->thread = NULL;
+		put_task_struct(p);
+		per_cpu(cpu_stopper_task, cpu) = NULL;
 		break;
 	}
 #endif

^ permalink raw reply related	[flat|nested] 67+ messages in thread

* [tip:smp/hotplug] stop_machine: Use smpboot threads
  2013-01-31 12:11 ` [patch 03/40] stop_machine: Use smpboot threads Thomas Gleixner
  2013-02-09  0:39   ` Paul E. McKenney
@ 2013-02-14 17:49   ` tip-bot for Thomas Gleixner
  1 sibling, 0 replies; 67+ messages in thread
From: tip-bot for Thomas Gleixner @ 2013-02-14 17:49 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, rusty, pjt, peterz, srivatsa.bhat, rw,
	paulmck, arjan, tglx, magnus.damm

Commit-ID:  14e568e78f6f80ca1e27256641ddf524c7dbdc51
Gitweb:     http://git.kernel.org/tip/14e568e78f6f80ca1e27256641ddf524c7dbdc51
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Thu, 31 Jan 2013 12:11:14 +0000
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 14 Feb 2013 15:29:38 +0100

stop_machine: Use smpboot threads

Use the smpboot thread infrastructure. Mark the stopper thread
selfparking and park it after it has finished the take_cpu_down()
work.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Arjan van de Veen <arjan@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Richard Weinberger <rw@linutronix.de>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130131120741.686315164@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/cpu.c          |   2 +
 kernel/stop_machine.c | 136 +++++++++++++++++++-------------------------------
 2 files changed, 52 insertions(+), 86 deletions(-)

diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3046a50..c91e30d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -254,6 +254,8 @@ static int __ref take_cpu_down(void *_param)
 		return err;
 
 	cpu_notify(CPU_DYING | param->mod, param->hcpu);
+	/* Park the stopper thread */
+	kthread_park(current);
 	return 0;
 }
 
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index aaac68c..95d178c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -18,7 +18,7 @@
 #include <linux/stop_machine.h>
 #include <linux/interrupt.h>
 #include <linux/kallsyms.h>
-
+#include <linux/smpboot.h>
 #include <linux/atomic.h>
 
 /*
@@ -245,20 +245,25 @@ int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
 	return ret;
 }
 
-static int cpu_stopper_thread(void *data)
+static int cpu_stop_should_run(unsigned int cpu)
+{
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+	unsigned long flags;
+	int run;
+
+	spin_lock_irqsave(&stopper->lock, flags);
+	run = !list_empty(&stopper->works);
+	spin_unlock_irqrestore(&stopper->lock, flags);
+	return run;
+}
+
+static void cpu_stopper_thread(unsigned int cpu)
 {
-	struct cpu_stopper *stopper = data;
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 	struct cpu_stop_work *work;
 	int ret;
 
 repeat:
-	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
-
-	if (kthread_should_stop()) {
-		__set_current_state(TASK_RUNNING);
-		return 0;
-	}
-
 	work = NULL;
 	spin_lock_irq(&stopper->lock);
 	if (!list_empty(&stopper->works)) {
@@ -274,8 +279,6 @@ repeat:
 		struct cpu_stop_done *done = work->done;
 		char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 
-		__set_current_state(TASK_RUNNING);
-
 		/* cpu stop callbacks are not allowed to sleep */
 		preempt_disable();
 
@@ -291,87 +294,55 @@ repeat:
 					  ksym_buf), arg);
 
 		cpu_stop_signal_done(done, true);
-	} else
-		schedule();
-
-	goto repeat;
+		goto repeat;
+	}
 }
 
 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 
-/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
-static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
-					   unsigned long action, void *hcpu)
+static void cpu_stop_create(unsigned int cpu)
+{
+	sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
+}
+
+static void cpu_stop_park(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
+	struct cpu_stop_work *work;
+	unsigned long flags;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
-		p = kthread_create_on_node(cpu_stopper_thread,
-					   stopper,
-					   cpu_to_node(cpu),
-					   "migration/%d", cpu);
-		if (IS_ERR(p))
-			return notifier_from_errno(PTR_ERR(p));
-		get_task_struct(p);
-		kthread_bind(p, cpu);
-		sched_set_stop_task(cpu, p);
-		per_cpu(cpu_stopper_task, cpu) = p;
-		break;
-
-	case CPU_ONLINE:
-		/* strictly unnecessary, as first user will wake it */
-		wake_up_process(p);
-		/* mark enabled */
-		spin_lock_irq(&stopper->lock);
-		stopper->enabled = true;
-		spin_unlock_irq(&stopper->lock);
-		break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_UP_CANCELED:
-	case CPU_POST_DEAD:
-	{
-		struct cpu_stop_work *work;
-
-		sched_set_stop_task(cpu, NULL);
-		/* kill the stopper */
-		kthread_stop(p);
-		/* drain remaining works */
-		spin_lock_irq(&stopper->lock);
-		list_for_each_entry(work, &stopper->works, list)
-			cpu_stop_signal_done(work->done, false);
-		stopper->enabled = false;
-		spin_unlock_irq(&stopper->lock);
-		/* release the stopper */
-		put_task_struct(p);
-		per_cpu(cpu_stopper_task, cpu) = NULL;
-		break;
-	}
-#endif
-	}
+	/* drain remaining works */
+	spin_lock_irqsave(&stopper->lock, flags);
+	list_for_each_entry(work, &stopper->works, list)
+		cpu_stop_signal_done(work->done, false);
+	stopper->enabled = false;
+	spin_unlock_irqrestore(&stopper->lock, flags);
+}
 
-	return NOTIFY_OK;
+static void cpu_stop_unpark(unsigned int cpu)
+{
+	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+	spin_lock_irq(&stopper->lock);
+	stopper->enabled = true;
+	spin_unlock_irq(&stopper->lock);
 }
 
-/*
- * Give it a higher priority so that cpu stopper is available to other
- * cpu notifiers.  It currently shares the same priority as sched
- * migration_notifier.
- */
-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
-	.notifier_call	= cpu_stop_cpu_callback,
-	.priority	= 10,
+static struct smp_hotplug_thread cpu_stop_threads = {
+	.store			= &cpu_stopper_task,
+	.thread_should_run	= cpu_stop_should_run,
+	.thread_fn		= cpu_stopper_thread,
+	.thread_comm		= "migration/%u",
+	.create			= cpu_stop_create,
+	.setup			= cpu_stop_unpark,
+	.park			= cpu_stop_park,
+	.unpark			= cpu_stop_unpark,
+	.selfparking		= true,
 };
 
 static int __init cpu_stop_init(void)
 {
-	void *bcpu = (void *)(long)smp_processor_id();
 	unsigned int cpu;
-	int err;
 
 	for_each_possible_cpu(cpu) {
 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -380,15 +351,8 @@ static int __init cpu_stop_init(void)
 		INIT_LIST_HEAD(&stopper->works);
 	}
 
-	/* start one for the boot cpu */
-	err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
-				    bcpu);
-	BUG_ON(err != NOTIFY_OK);
-	cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
-	register_cpu_notifier(&cpu_stop_cpu_notifier);
-
+	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
 	stop_machine_initialized = true;
-
 	return 0;
 }
 early_initcall(cpu_stop_init);

^ permalink raw reply related	[flat|nested] 67+ messages in thread

* Re: [patch 04/40] cpu: Restructure FROZEN state handling
  2013-01-31 12:11 ` [patch 04/40] cpu: Restructure FROZEN state handling Thomas Gleixner
  2013-02-09  0:52   ` Paul E. McKenney
@ 2014-10-09 16:53   ` Borislav Petkov
  1 sibling, 0 replies; 67+ messages in thread
From: Borislav Petkov @ 2014-10-09 16:53 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner, Magnus Damm,
	Jörg Rödel

On Thu, Jan 31, 2013 at 12:11:15PM -0000, Thomas Gleixner wrote:
> There are only a few callbacks which really care about FROZEN
> vs. !FROZEN. No need to have extra states for this. 
> 
> Publish the frozen state in an extra variable which is updated under
> the hotplug lock and let the users interested deal with it w/o
> imposing that extra state checks on everyone.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>  kernel/cpu.c |   66 ++++++++++++++++++++++++-----------------------------------
>  1 file changed, 27 insertions(+), 39 deletions(-)

So, I'm looking through this and trying to apply the patches ontop of
current kernel. Here's this one quilt-refreshed ontop of 3.17.

---

>From linux-kernel-owner@vger.kernel.org Thu Jan 31 13:13:23 2013
Message-Id: <20130131120741.751984627@linutronix.de>
User-Agent: quilt/0.48-1
Date:	Thu, 31 Jan 2013 12:11:15 -0000
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>, Peter Zijlstra <peterz@infradead.org>,
 Rusty Russell <rusty@rustcorp.com.au>, Paul McKenney
 <paulmck@linux.vnet.ibm.com>, "Srivatsa S. Bhat"
 <srivatsa.bhat@linux.vnet.ibm.com>, Arjan van de Veen <arjan@infradead.org>,
 Paul Turner <pjt@google.com>, Richard Weinberger <rw@linutronix.de>, Magnus
 Damm <magnus.damm@gmail.com>
Subject: [patch 04/40] cpu: Restructure FROZEN state handling
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain; charset=utf-8
Status: RO

There are only a few callbacks which really care about FROZEN
vs. !FROZEN. No need to have extra states for this. 

Publish the frozen state in an extra variable which is updated under
the hotplug lock and let the users interested deal with it w/o
imposing that extra state checks on everyone.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/cpu.c |   66 ++++++++++++++++++++++++-----------------------------------
 1 file changed, 27 insertions(+), 39 deletions(-)

Index: linux/kernel/cpu.c
===================================================================
--- linux.orig/kernel/cpu.c	2014-10-09 18:40:30.991799290 +0200
+++ linux/kernel/cpu.c	2014-10-09 18:42:46.823798395 +0200
@@ -27,6 +27,7 @@
 #ifdef CONFIG_SMP
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
 static DEFINE_MUTEX(cpu_add_remove_lock);
+static bool cpuhp_tasks_frozen;
 
 /*
  * The following two APIs (cpu_maps_update_begin/done) must be used when
@@ -194,27 +195,30 @@ int __ref __register_cpu_notifier(struct
 	return raw_notifier_chain_register(&cpu_chain, nb);
 }
 
-static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
+static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
 			int *nr_calls)
 {
+	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
+	void *hcpu = (void *)(long)cpu;
+
 	int ret;
 
-	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
+	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
 					nr_calls);
 
 	return notifier_to_errno(ret);
 }
 
-static int cpu_notify(unsigned long val, void *v)
+static int cpu_notify(unsigned long val, unsigned int cpu)
 {
-	return __cpu_notify(val, v, -1, NULL);
+	return __cpu_notify(val, cpu, -1, NULL);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-static void cpu_notify_nofail(unsigned long val, void *v)
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
 {
-	BUG_ON(cpu_notify(val, v));
+	BUG_ON(cpu_notify(val, cpu));
 }
 EXPORT_SYMBOL(register_cpu_notifier);
 EXPORT_SYMBOL(__register_cpu_notifier);
@@ -298,23 +302,17 @@ static inline void check_for_tasks(int d
 	read_unlock_irq(&tasklist_lock);
 }
 
-struct take_cpu_down_param {
-	unsigned long mod;
-	void *hcpu;
-};
-
 /* Take this CPU down. */
 static int __ref take_cpu_down(void *_param)
 {
-	struct take_cpu_down_param *param = _param;
-	int err;
+	int err, cpu = smp_processor_id();
 
 	/* Ensure this CPU doesn't handle any more interrupts. */
 	err = __cpu_disable();
 	if (err < 0)
 		return err;
 
-	cpu_notify(CPU_DYING | param->mod, param->hcpu);
+	cpu_notify(CPU_DYING, cpu);
 	/* Park the stopper thread */
 	kthread_park(current);
 	return 0;
@@ -324,12 +322,6 @@ static int __ref take_cpu_down(void *_pa
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
 	int err, nr_calls = 0;
-	void *hcpu = (void *)(long)cpu;
-	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
-	struct take_cpu_down_param tcd_param = {
-		.mod = mod,
-		.hcpu = hcpu,
-	};
 
 	if (num_online_cpus() == 1)
 		return -EBUSY;
@@ -339,10 +331,12 @@ static int __ref _cpu_down(unsigned int
 
 	cpu_hotplug_begin();
 
-	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+	cpuhp_tasks_frozen = tasks_frozen;
+
+	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
 	if (err) {
 		nr_calls--;
-		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
 		pr_warn("%s: attempt to take down CPU %u failed\n",
 			__func__, cpu);
 		goto out_release;
@@ -369,11 +363,11 @@ static int __ref _cpu_down(unsigned int
 	 * So now all preempt/rcu users must observe !cpu_active().
 	 */
 
-	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+	err = __stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
 		/* CPU didn't die: tell everyone.  Can't complain. */
 		smpboot_unpark_threads(cpu);
-		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
+		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
 		goto out_release;
 	}
 	BUG_ON(cpu_online(cpu));
@@ -392,14 +386,14 @@ static int __ref _cpu_down(unsigned int
 	__cpu_die(cpu);
 
 	/* CPU is completely dead: tell everyone.  Too late to complain. */
-	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
+	cpu_notify_nofail(CPU_DEAD, cpu);
 
 	check_for_tasks(cpu);
 
 out_release:
 	cpu_hotplug_done();
 	if (!err)
-		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+		cpu_notify_nofail(CPU_POST_DEAD, cpu);
 	return err;
 }
 
@@ -426,10 +420,8 @@ EXPORT_SYMBOL(cpu_down);
 /* Requires cpu_add_remove_lock to be held */
 static int _cpu_up(unsigned int cpu, int tasks_frozen)
 {
-	int ret, nr_calls = 0;
-	void *hcpu = (void *)(long)cpu;
-	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 	struct task_struct *idle;
+	int ret, nr_calls = 0;
 
 	cpu_hotplug_begin();
 
@@ -448,7 +440,9 @@ static int _cpu_up(unsigned int cpu, int
 	if (ret)
 		goto out;
 
-	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
+	cpuhp_tasks_frozen = tasks_frozen;
+
+	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
 	if (ret) {
 		nr_calls--;
 		pr_warn("%s: attempt to bring up CPU %u failed\n",
@@ -466,11 +460,11 @@ static int _cpu_up(unsigned int cpu, int
 	smpboot_unpark_threads(cpu);
 
 	/* Now call notifier in preparation. */
-	cpu_notify(CPU_ONLINE | mod, hcpu);
+	cpu_notify(CPU_ONLINE, cpu);
 
 out_notify:
 	if (ret != 0)
-		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
+		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
 out:
 	cpu_hotplug_done();
 
@@ -657,13 +651,7 @@ core_initcall(cpu_hotplug_pm_sync_init);
  */
 void notify_cpu_starting(unsigned int cpu)
 {
-	unsigned long val = CPU_STARTING;
-
-#ifdef CONFIG_PM_SLEEP_SMP
-	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
-		val = CPU_STARTING_FROZEN;
-#endif /* CONFIG_PM_SLEEP_SMP */
-	cpu_notify(val, (void *)(long)cpu);
+	cpu_notify(CPU_STARTING, cpu);
 }
 
 #endif /* CONFIG_SMP */

-- 
Regards/Gruss,
    Boris.

Sent from a fat crate under my desk. Formatting is fine.
--

^ permalink raw reply	[flat|nested] 67+ messages in thread

* Re: [patch 05/40] cpu: Restructure cpu_down code
  2013-01-31 12:11 ` [patch 05/40] cpu: Restructure cpu_down code Thomas Gleixner
  2013-02-09  0:49   ` Paul E. McKenney
@ 2014-10-09 17:05   ` Borislav Petkov
  1 sibling, 0 replies; 67+ messages in thread
From: Borislav Petkov @ 2014-10-09 17:05 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Rusty Russell, Paul McKenney,
	Srivatsa S. Bhat, Arjan van de Veen, Paul Turner, Magnus Damm,
	Richard Weinberger, Jörg Rödel

On Thu, Jan 31, 2013 at 12:11:15PM -0000, Thomas Gleixner wrote:
> Split out into separate functions, so we can convert it to a state machine.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Refreshed:

>From linux-kernel-owner@vger.kernel.org Thu Jan 31 13:11:32 2013
Message-Id: <20130131120741.823640323@linutronix.de>
User-Agent: quilt/0.48-1
Date:	Thu, 31 Jan 2013 12:11:15 -0000
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>, Peter Zijlstra <peterz@infradead.org>,
 Rusty Russell <rusty@rustcorp.com.au>, Paul McKenney
 <paulmck@linux.vnet.ibm.com>, "Srivatsa S. Bhat"
 <srivatsa.bhat@linux.vnet.ibm.com>, Arjan van de Veen <arjan@infradead.org>,
 Paul Turner <pjt@google.com>, Richard Weinberger <rw@linutronix.de>, Magnus
 Damm <magnus.damm@gmail.com>
Subject: [patch 05/40] cpu: Restructure cpu_down code
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain; charset=utf-8
Status: RO

Split out into separate functions, so we can convert it to a state machine.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/cpu.c |   69 ++++++++++++++++++++++++++++++++++++++++-------------------
 1 file changed, 47 insertions(+), 22 deletions(-)

Index: linux/kernel/cpu.c
===================================================================
--- linux.orig/kernel/cpu.c	2014-10-09 18:54:47.251793646 +0200
+++ linux/kernel/cpu.c	2014-10-09 19:00:46.743791277 +0200
@@ -214,6 +214,43 @@ static int cpu_notify(unsigned long val,
 	return __cpu_notify(val, cpu, -1, NULL);
 }
 
+/* Notifier wrappers for transitioning to state machine */
+static int notify_prepare(unsigned int cpu)
+{
+	int nr_calls = 0;
+	int ret;
+
+	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
+	if (ret) {
+		nr_calls--;
+		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
+				__func__, cpu);
+		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
+	}
+	return ret;
+}
+
+static int notify_online(unsigned int cpu)
+{
+	cpu_notify(CPU_ONLINE, cpu);
+	return 0;
+}
+
+static int bringup_cpu(unsigned int cpu)
+{
+	struct task_struct *idle = idle_thread_get(cpu);
+	int ret;
+
+	/* Arch-specific enabling code. */
+	ret = __cpu_up(cpu, idle);
+	if (ret) {
+		cpu_notify(CPU_UP_CANCELED, cpu);
+		return ret;
+	}
+	BUG_ON(!cpu_online(cpu));
+	return 0;
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 
 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
@@ -421,7 +458,7 @@ EXPORT_SYMBOL(cpu_down);
 static int _cpu_up(unsigned int cpu, int tasks_frozen)
 {
 	struct task_struct *idle;
-	int ret, nr_calls = 0;
+	int ret;
 
 	cpu_hotplug_begin();
 
@@ -436,35 +473,24 @@ static int _cpu_up(unsigned int cpu, int
 		goto out;
 	}
 
+	cpuhp_tasks_frozen = tasks_frozen;
+
 	ret = smpboot_create_threads(cpu);
 	if (ret)
 		goto out;
 
-	cpuhp_tasks_frozen = tasks_frozen;
-
-	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
-	if (ret) {
-		nr_calls--;
-		pr_warn("%s: attempt to bring up CPU %u failed\n",
-			__func__, cpu);
-		goto out_notify;
-	}
+	ret = notify_prepare(cpu);
+	if (ret)
+		goto out;
 
-	/* Arch-specific enabling code. */
-	ret = __cpu_up(cpu, idle);
-	if (ret != 0)
-		goto out_notify;
-	BUG_ON(!cpu_online(cpu));
+	ret = bringup_cpu(cpu);
+	if (ret)
+		goto out;
 
 	/* Wake the per cpu threads */
 	smpboot_unpark_threads(cpu);
 
-	/* Now call notifier in preparation. */
-	cpu_notify(CPU_ONLINE, cpu);
-
-out_notify:
-	if (ret != 0)
-		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
+	notify_online(cpu);
 out:
 	cpu_hotplug_done();
 

-- 
Regards/Gruss,
    Boris.

Sent from a fat crate under my desk. Formatting is fine.
--

^ permalink raw reply	[flat|nested] 67+ messages in thread

end of thread, other threads:[~2014-10-09 17:05 UTC | newest]

Thread overview: 67+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-01-31 15:44 [patch 00/40] CPU hotplug rework - episode I Thomas Gleixner
2013-01-31 12:11 ` [patch 01/40] smpboot: Allow selfparking per cpu threads Thomas Gleixner
2013-02-09  0:29   ` Paul E. McKenney
2013-02-14 17:46   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2013-01-31 12:11 ` [patch 02/40] stop_machine: Store task reference in a separate per cpu variable Thomas Gleixner
2013-02-09  0:33   ` Paul E. McKenney
2013-02-14 17:47   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2013-01-31 12:11 ` [patch 03/40] stop_machine: Use smpboot threads Thomas Gleixner
2013-02-09  0:39   ` Paul E. McKenney
2013-02-14 17:49   ` [tip:smp/hotplug] " tip-bot for Thomas Gleixner
2013-01-31 12:11 ` [patch 04/40] cpu: Restructure FROZEN state handling Thomas Gleixner
2013-02-09  0:52   ` Paul E. McKenney
2014-10-09 16:53   ` Borislav Petkov
2013-01-31 12:11 ` [patch 05/40] cpu: Restructure cpu_down code Thomas Gleixner
2013-02-09  0:49   ` Paul E. McKenney
2014-10-09 17:05   ` Borislav Petkov
2013-01-31 12:11 ` [patch 06/40] cpu: hotplug: Split out cpu down functions Thomas Gleixner
2013-02-09  0:54   ` Paul E. McKenney
2013-01-31 12:11 ` [patch 07/40] cpu: hotplug: Convert to a state machine for the control processor Thomas Gleixner
2013-02-11 20:09   ` Paul E. McKenney
2013-01-31 12:11 ` [patch 08/40] cpu: hotplug: Convert the hotplugged processor work to a state machine Thomas Gleixner
2013-02-11 20:17   ` Paul E. McKenney
2013-01-31 12:11 ` [patch 10/40] sched: Convert to state machine callbacks Thomas Gleixner
2013-02-11 23:46   ` Paul E. McKenney
2013-01-31 12:11 ` [patch 09/40] cpu: hotplug: Implement setup/removal interface Thomas Gleixner
2013-02-01 13:44   ` Hillf Danton
2013-02-01 13:52     ` Thomas Gleixner
2013-01-31 12:11 ` [patch 11/40] x86: uncore: Move teardown callback to CPU_DEAD Thomas Gleixner
2013-01-31 12:11 ` [patch 12/40] x86: uncore: Convert to hotplug state machine Thomas Gleixner
2013-01-31 12:11 ` [patch 13/40] perf: " Thomas Gleixner
2013-01-31 12:11 ` [patch 14/40] x86: perf: Convert the core to the " Thomas Gleixner
2013-01-31 12:11 ` [patch 16/40] blackfin: perf: Convert hotplug notifier to " Thomas Gleixner
2013-01-31 12:11 ` [patch 15/40] x86: perf: Convert AMD IBS to hotplug " Thomas Gleixner
2013-01-31 12:11 ` [patch 17/40] powerpc: perf: Convert book3s notifier to state machine callbacks Thomas Gleixner
2013-01-31 12:11 ` [patch 18/40] s390: perf: Convert the hotplug " Thomas Gleixner
2013-01-31 12:11 ` [patch 19/40] sh: perf: Convert the hotplug notifiers " Thomas Gleixner
2013-01-31 12:11 ` [patch 21/40] sched: Convert the migration callback to hotplug states Thomas Gleixner
2013-01-31 12:11 ` [patch 20/40] perf: Remove perf cpu notifier code Thomas Gleixner
2013-01-31 12:11 ` [patch 22/40] workqueue: Convert to state machine callbacks Thomas Gleixner
2013-01-31 12:11 ` [patch 23/40] cpufreq: Convert to hotplug state machine Thomas Gleixner
2013-01-31 12:11 ` [patch 24/40] arm64: Convert generic timers " Thomas Gleixner
2013-01-31 12:11 ` [patch 25/40] arm: Convert VFP hotplug notifiers to " Thomas Gleixner
2013-01-31 12:11 ` [patch 26/40] arm: perf: Convert to hotplug " Thomas Gleixner
2013-01-31 12:11 ` [patch 27/40] virt: Convert kvm hotplug to " Thomas Gleixner
2013-01-31 12:11 ` [patch 28/40] cpuhotplug: Remove CPU_STARTING notifier Thomas Gleixner
2013-01-31 12:11 ` [patch 29/40] s390: Convert vtime to hotplug state machine Thomas Gleixner
2013-01-31 12:11 ` [patch 30/40] x86: tboot: Convert " Thomas Gleixner
2013-01-31 12:11 ` [patch 31/40] sched: Convert fair nohz balancer " Thomas Gleixner
2013-01-31 12:11 ` [patch 33/40] hrtimer: Convert " Thomas Gleixner
2013-01-31 12:11 ` [patch 32/40] rcu: Convert rcutree " Thomas Gleixner
2013-02-12  0:01   ` Paul E. McKenney
2013-02-12 15:50     ` Paul E. McKenney
2013-01-31 12:11 ` [patch 34/40] cpuhotplug: Remove CPU_DYING notifier Thomas Gleixner
2013-01-31 12:11 ` [patch 35/40] timers: Convert to hotplug state machine Thomas Gleixner
2013-01-31 12:11 ` [patch 36/40] profile: Convert ot " Thomas Gleixner
2013-01-31 12:11 ` [patch 37/40] x86: x2apic: Convert to cpu " Thomas Gleixner
2013-01-31 12:11 ` [patch 38/40] smp: Convert core to " Thomas Gleixner
2013-01-31 12:11 ` [patch 39/40] relayfs: Convert " Thomas Gleixner
2013-01-31 12:11 ` [patch 40/40] slab: " Thomas Gleixner
2013-01-31 20:23 ` [patch 00/40] CPU hotplug rework - episode I Andrew Morton
2013-01-31 21:48   ` Thomas Gleixner
2013-01-31 21:59     ` Linus Torvalds
2013-01-31 22:44       ` Thomas Gleixner
2013-01-31 22:55         ` Linus Torvalds
2013-02-01 10:51           ` Thomas Gleixner
2013-02-07  4:01             ` Rusty Russell
2013-02-09  0:28 ` Paul E. McKenney

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.