linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC][PATCH 4/4] use disable_cpu_hotplug() instead of lock_cpu_hotplug() where appropriate
@ 2005-01-19 21:38 Dominik Brodowski
  0 siblings, 0 replies; only message in thread
From: Dominik Brodowski @ 2005-01-19 21:38 UTC (permalink / raw)
  To: dhowells, linux-kernel, anton, mingo

Use {dis,en}able_cpu_hotplug() instead of {un,}lock_cpu_hotplug() in
obvious(?) places which don't need serialization (or provide it on their
own) and don't need to be serialized against each other (like ppc64's rtasd
and cpufreq).

Signed-off-by: Dominik Brodowski <linux@brodo.de>
---

 arch/ppc64/kernel/rtasd.c |   10 +++++-----
 drivers/cpufreq/cpufreq.c |    4 ++--
 kernel/sched.c            |   14 +++++++-------
 kernel/stop_machine.c     |    4 ++--
 net/core/flow.c           |    4 ++--
 5 files changed, 18 insertions(+), 18 deletions(-)

Index: 2.6.11-rc1+/arch/ppc64/kernel/rtasd.c
===================================================================
--- 2.6.11-rc1+.orig/arch/ppc64/kernel/rtasd.c	2005-01-16 23:15:25.000000000 +0100
+++ 2.6.11-rc1+/arch/ppc64/kernel/rtasd.c	2005-01-18 19:54:21.000000000 +0100
@@ -437,7 +437,7 @@
 	}
 
 	/* First pass. */
-	lock_cpu_hotplug();
+	disable_cpu_hotplug();
 	for_each_online_cpu(cpu) {
 		DEBUG("scheduling on %d\n", cpu);
 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -447,7 +447,7 @@
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule_timeout(HZ);
 	}
-	unlock_cpu_hotplug();
+	enable_cpu_hotplug();
 
 	if (surveillance_timeout != -1) {
 		DEBUG("enabling surveillance\n");
@@ -455,7 +455,7 @@
 		DEBUG("surveillance enabled\n");
 	}
 
-	lock_cpu_hotplug();
+	disable_cpu_hotplug();
 	cpu = first_cpu(cpu_online_map);
 	for (;;) {
 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -465,10 +465,10 @@
 		/* Drop hotplug lock, and sleep for a bit (at least
 		 * one second since some machines have problems if we
 		 * call event-scan too quickly). */
-		unlock_cpu_hotplug();
+		enable_cpu_hotplug();
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule_timeout((HZ*60/rtas_event_scan_rate) / 2);
-		lock_cpu_hotplug();
+		disable_cpu_hotplug();
 
 		cpu = next_cpu(cpu, cpu_online_map);
 		if (cpu == NR_CPUS)
Index: 2.6.11-rc1+/drivers/cpufreq/cpufreq.c
===================================================================
--- 2.6.11-rc1+.orig/drivers/cpufreq/cpufreq.c	2005-01-16 23:23:11.000000000 +0100
+++ 2.6.11-rc1+/drivers/cpufreq/cpufreq.c	2005-01-18 19:53:13.000000000 +0100
@@ -1027,12 +1027,12 @@
 			    unsigned int relation)
 {
 	int retval = -EINVAL;
-	lock_cpu_hotplug();
+	disable_cpu_hotplug();
 	dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
 		target_freq, relation);
 	if (cpu_online(policy->cpu) && cpufreq_driver->target)
 		retval = cpufreq_driver->target(policy, target_freq, relation);
-	unlock_cpu_hotplug();
+	enable_cpu_hotplug();
 	return retval;
 }
 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
Index: 2.6.11-rc1+/kernel/sched.c
===================================================================
--- 2.6.11-rc1+.orig/kernel/sched.c	2005-01-16 23:23:12.000000000 +0100
+++ 2.6.11-rc1+/kernel/sched.c	2005-01-18 19:56:59.000000000 +0100
@@ -3422,13 +3422,13 @@
 	task_t *p;
 	int retval;
 
-	lock_cpu_hotplug();
+	disable_cpu_hotplug();
 	read_lock(&tasklist_lock);
 
 	p = find_process_by_pid(pid);
 	if (!p) {
 		read_unlock(&tasklist_lock);
-		unlock_cpu_hotplug();
+		enable_cpu_hotplug();
 		return -ESRCH;
 	}
 
@@ -3449,7 +3449,7 @@
 
 out_unlock:
 	put_task_struct(p);
-	unlock_cpu_hotplug();
+	enable_cpu_hotplug();
 	return retval;
 }
 
@@ -3503,7 +3503,7 @@
 	int retval;
 	task_t *p;
 
-	lock_cpu_hotplug();
+	disable_cpu_hotplug();
 	read_lock(&tasklist_lock);
 
 	retval = -ESRCH;
@@ -3516,7 +3516,7 @@
 
 out_unlock:
 	read_unlock(&tasklist_lock);
-	unlock_cpu_hotplug();
+	enable_cpu_hotplug();
 	if (retval)
 		return retval;
 
@@ -4309,8 +4309,8 @@
 		BUG_ON(rq->nr_running != 0);
 
 		/* No need to migrate the tasks: it was best-effort if
-		 * they didn't do lock_cpu_hotplug().  Just wake up
-		 * the requestors. */
+		 * they didn't do lock_cpu_hotplug() or disable_cpu_hotplug().
+		 *  Just wake up the requestors. */
 		spin_lock_irq(&rq->lock);
 		while (!list_empty(&rq->migration_queue)) {
 			migration_req_t *req;
Index: 2.6.11-rc1+/kernel/stop_machine.c
===================================================================
--- 2.6.11-rc1+.orig/kernel/stop_machine.c	2005-01-16 23:15:30.000000000 +0100
+++ 2.6.11-rc1+/kernel/stop_machine.c	2005-01-18 19:55:25.000000000 +0100
@@ -195,13 +195,13 @@
 	int ret;
 
 	/* No CPUs can come up or down during this. */
-	lock_cpu_hotplug();
+	disable_cpu_hotplug();
 	p = __stop_machine_run(fn, data, cpu);
 	if (!IS_ERR(p))
 		ret = kthread_stop(p);
 	else
 		ret = PTR_ERR(p);
-	unlock_cpu_hotplug();
+	enable_cpu_hotplug();
 
 	return ret;
 }
Index: 2.6.11-rc1+/net/core/flow.c
===================================================================
--- 2.6.11-rc1+.orig/net/core/flow.c	2004-04-04 14:14:00.000000000 +0200
+++ 2.6.11-rc1+/net/core/flow.c	2005-01-18 19:54:57.000000000 +0100
@@ -286,7 +286,7 @@
 	static DECLARE_MUTEX(flow_flush_sem);
 
 	/* Don't want cpus going down or up during this. */
-	lock_cpu_hotplug();
+	disable_cpu_hotplug();
 	down(&flow_flush_sem);
 	atomic_set(&info.cpuleft, num_online_cpus());
 	init_completion(&info.completion);
@@ -298,7 +298,7 @@
 
 	wait_for_completion(&info.completion);
 	up(&flow_flush_sem);
-	unlock_cpu_hotplug();
+	enable_cpu_hotplug();
 }
 
 static void __devinit flow_cache_cpu_prepare(int cpu)

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2005-01-19 21:41 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-01-19 21:38 [RFC][PATCH 4/4] use disable_cpu_hotplug() instead of lock_cpu_hotplug() where appropriate Dominik Brodowski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).