All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tejun Heo <tj@kernel.org>
To: jeff@garzik.org, mingo@elte.hu, linux-kernel@vger.kernel.org,
	akpm@linux-foundation.org, jens.axboe@oracle.com,
	rusty@rustcorp.com.au, cl@linux-foundation.org,
	dhowells@redhat.com, arjan@linux.intel.com
Cc: Tejun Heo <tj@kernel.org>
Subject: [PATCH 14/19] workqueue: (TEMPORARY) kill singlethread variant
Date: Thu,  1 Oct 2009 17:09:13 +0900	[thread overview]
Message-ID: <1254384558-1018-15-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1254384558-1018-1-git-send-email-tj@kernel.org>

This is incorrect.  There are workqueue users which depend on single
thread for synchronization purpose.  Working on proper solution.

NOT_SIGNED_OFF
---
 include/linux/workqueue.h |    5 +-
 kernel/workqueue.c        |  128 ++++++++++++---------------------------------
 2 files changed, 36 insertions(+), 97 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 541c5eb..5aa0e15 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -178,7 +178,6 @@ struct execute_work {
 
 enum {
 	WQ_FREEZEABLE		= 1 << 0, /* freeze during suspend */
-	WQ_SINGLE_THREAD	= 1 << 1, /* no per-cpu worker */
 };
 
 extern struct workqueue_struct *
@@ -207,9 +206,9 @@ __create_workqueue_key(const char *name, unsigned int flags,
 #define create_workqueue(name)					\
 	__create_workqueue((name), 0)
 #define create_freezeable_workqueue(name)			\
-	__create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD)
+	__create_workqueue((name), WQ_FREEZEABLE)
 #define create_singlethread_workqueue(name)			\
-	__create_workqueue((name), WQ_SINGLE_THREAD)
+	__create_workqueue((name), 0)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 39a04ec..6370c9b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -47,8 +47,7 @@
  */
 
 /*
- * The per-CPU workqueue (if single thread, we always use the first
- * possible cpu).  The lower WORK_STRUCT_FLAG_BITS of
+ * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
  * work_struct->data are used for flags and thus cwqs need to be
  * aligned at two's power of the number of flag bits.
  */
@@ -82,34 +81,9 @@ struct workqueue_struct {
 static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
 
-static int singlethread_cpu __read_mostly;
-static const struct cpumask *cpu_singlethread_map __read_mostly;
-/*
- * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
- * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
- * which comes in between can't use for_each_online_cpu(). We could
- * use cpu_possible_map, the cpumask below is more a documentation
- * than optimization.
- */
-static cpumask_var_t cpu_populated_map __read_mostly;
-
-/* If it's single threaded, it isn't in the list of workqueues. */
-static inline bool is_wq_single_threaded(struct workqueue_struct *wq)
-{
-	return wq->flags & WQ_SINGLE_THREAD;
-}
-
-static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
-{
-	return is_wq_single_threaded(wq)
-		? cpu_singlethread_map : cpu_populated_map;
-}
-
 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
 					    struct workqueue_struct *wq)
 {
-	if (unlikely(is_wq_single_threaded(wq)))
-		cpu = singlethread_cpu;
 	return per_cpu_ptr(wq->cpu_wq, cpu);
 }
 
@@ -467,13 +441,12 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  */
 void flush_workqueue(struct workqueue_struct *wq)
 {
-	const struct cpumask *cpu_map = wq_cpu_map(wq);
 	int cpu;
 
 	might_sleep();
 	lock_map_acquire(&wq->lockdep_map);
 	lock_map_release(&wq->lockdep_map);
-	for_each_cpu(cpu, cpu_map)
+	for_each_possible_cpu(cpu)
 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -587,7 +560,6 @@ static void wait_on_work(struct work_struct *work)
 {
 	struct cpu_workqueue_struct *cwq;
 	struct workqueue_struct *wq;
-	const struct cpumask *cpu_map;
 	int cpu;
 
 	might_sleep();
@@ -600,9 +572,8 @@ static void wait_on_work(struct work_struct *work)
 		return;
 
 	wq = cwq->wq;
-	cpu_map = wq_cpu_map(wq);
 
-	for_each_cpu(cpu, cpu_map)
+	for_each_possible_cpu(cpu)
 		wait_on_cpu_work(get_cwq(cpu, wq), work);
 }
 
@@ -801,26 +772,12 @@ int current_is_keventd(void)
 	return is_sched_workqueue(current);
 }
 
-static struct cpu_workqueue_struct *
-init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
-{
-	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-
-	cwq->wq = wq;
-	spin_lock_init(&cwq->lock);
-	INIT_LIST_HEAD(&cwq->worklist);
-	init_waitqueue_head(&cwq->more_work);
-
-	return cwq;
-}
-
 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
 	struct workqueue_struct *wq = cwq->wq;
-	const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
 	struct task_struct *p;
 
-	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
+	p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
 	/*
 	 * Nobody can add the work_struct to this cwq,
 	 *	if (caller is __create_workqueue)
@@ -853,7 +810,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 						const char *lock_name)
 {
 	struct workqueue_struct *wq;
-	struct cpu_workqueue_struct *cwq;
 	int err = 0, cpu;
 
 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
@@ -869,36 +825,36 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
 	INIT_LIST_HEAD(&wq->list);
 
-	if (flags & WQ_SINGLE_THREAD) {
-		cwq = init_cpu_workqueue(wq, singlethread_cpu);
-		err = create_workqueue_thread(cwq, singlethread_cpu);
-		start_workqueue_thread(cwq, -1);
-	} else {
-		cpu_maps_update_begin();
-		/*
-		 * We must place this wq on list even if the code below fails.
-		 * cpu_down(cpu) can remove cpu from cpu_populated_map before
-		 * destroy_workqueue() takes the lock, in that case we leak
-		 * cwq[cpu]->thread.
-		 */
-		spin_lock(&workqueue_lock);
-		list_add(&wq->list, &workqueues);
-		spin_unlock(&workqueue_lock);
-		/*
-		 * We must initialize cwqs for each possible cpu even if we
-		 * are going to call destroy_workqueue() finally. Otherwise
-		 * cpu_up() can hit the uninitialized cwq once we drop the
-		 * lock.
-		 */
-		for_each_possible_cpu(cpu) {
-			cwq = init_cpu_workqueue(wq, cpu);
-			if (err || !cpu_online(cpu))
-				continue;
-			err = create_workqueue_thread(cwq, cpu);
-			start_workqueue_thread(cwq, cpu);
-		}
-		cpu_maps_update_done();
+	cpu_maps_update_begin();
+	/*
+	 * We must place this wq on list even if the code below fails.
+	 * cpu_down(cpu) can remove cpu from cpu_populated_map before
+	 * destroy_workqueue() takes the lock, in that case we leak
+	 * cwq[cpu]->thread.
+	 */
+	spin_lock(&workqueue_lock);
+	list_add(&wq->list, &workqueues);
+	spin_unlock(&workqueue_lock);
+	/*
+	 * We must initialize cwqs for each possible cpu even if we
+	 * are going to call destroy_workqueue() finally. Otherwise
+	 * cpu_up() can hit the uninitialized cwq once we drop the
+	 * lock.
+	 */
+	for_each_possible_cpu(cpu) {
+		struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
+
+		cwq->wq = wq;
+		spin_lock_init(&cwq->lock);
+		INIT_LIST_HEAD(&cwq->worklist);
+		init_waitqueue_head(&cwq->more_work);
+
+		if (err || !cpu_online(cpu))
+			continue;
+		err = create_workqueue_thread(cwq, cpu);
+		start_workqueue_thread(cwq, cpu);
 	}
+	cpu_maps_update_done();
 
 	if (err) {
 		destroy_workqueue(wq);
@@ -949,7 +905,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
  */
 void destroy_workqueue(struct workqueue_struct *wq)
 {
-	const struct cpumask *cpu_map = wq_cpu_map(wq);
 	int cpu;
 
 	cpu_maps_update_begin();
@@ -957,7 +912,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
 	list_del(&wq->list);
 	spin_unlock(&workqueue_lock);
 
-	for_each_cpu(cpu, cpu_map)
+	for_each_possible_cpu(cpu)
 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
  	cpu_maps_update_done();
 
@@ -977,10 +932,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
 
 	action &= ~CPU_TASKS_FROZEN;
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-		cpumask_set_cpu(cpu, cpu_populated_map);
-	}
 undo:
 	list_for_each_entry(wq, &workqueues, list) {
 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
@@ -1007,12 +958,6 @@ undo:
 		}
 	}
 
-	switch (action) {
-	case CPU_UP_CANCELED:
-	case CPU_POST_DEAD:
-		cpumask_clear_cpu(cpu, cpu_populated_map);
-	}
-
 	return ret;
 }
 
@@ -1074,11 +1019,6 @@ void __init init_workqueues(void)
 	BUILD_BUG_ON(__alignof__(struct cpu_workqueue_struct) <
 		     __alignof__(unsigned long long));
 
-	alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
-
-	cpumask_copy(cpu_populated_map, cpu_online_mask);
-	singlethread_cpu = cpumask_first(cpu_possible_mask);
-	cpu_singlethread_map = cpumask_of(singlethread_cpu);
 	hotcpu_notifier(workqueue_cpu_callback, 0);
 	keventd_wq = create_workqueue("events");
 	BUG_ON(!keventd_wq);
-- 
1.6.4.2


  parent reply	other threads:[~2009-10-01  8:10 UTC|newest]

Thread overview: 90+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-10-01  8:08 [RFC PATCHSET] workqueue: implement concurrency managed workqueue Tejun Heo
2009-10-01  8:09 ` [PATCH 01/19] freezer: don't get over-anxious while waiting Tejun Heo
2009-10-01 18:36   ` Pavel Machek
2009-10-01 21:04     ` Rafael J. Wysocki
2009-10-02 10:56       ` Tejun Heo
2009-10-02 10:56         ` Tejun Heo
2009-10-02 19:47       ` Oren Laadan
2009-10-02 19:47       ` Oren Laadan
2009-10-02 21:04         ` Matt Helsley
2009-10-02 21:21           ` Rafael J. Wysocki
2009-10-02 21:21           ` Rafael J. Wysocki
2009-10-03  0:43             ` Tejun Heo
2009-10-03  0:43             ` Tejun Heo
2009-10-03 19:36               ` Rafael J. Wysocki
2009-10-03 19:36                 ` Rafael J. Wysocki
2009-10-02 21:04         ` Matt Helsley
2009-10-01 21:04     ` Rafael J. Wysocki
2009-10-01  8:09 ` [PATCH 02/19] scheduler: implement sched_class_equal() Tejun Heo
2009-10-01  8:09 ` [PATCH 03/19] scheduler: implement workqueue scheduler class Tejun Heo
2009-10-01 16:57   ` Linus Torvalds
2009-10-01 18:48     ` Ingo Molnar
2009-10-01 19:00       ` Avi Kivity
2009-10-01 19:11         ` Linus Torvalds
2009-10-01 19:23           ` Ingo Molnar
2009-10-01 20:03             ` Linus Torvalds
2009-10-01 19:15         ` Ingo Molnar
2009-10-01 19:06       ` Linus Torvalds
2009-10-02 12:23     ` Tejun Heo
2009-10-01  8:09 ` [PATCH 04/19] scheduler: implement force_cpus_allowed_ptr() Tejun Heo
2009-10-01  8:09 ` [PATCH 05/19] kthread: implement kthread_data() Tejun Heo
2009-10-01  8:09 ` [PATCH 06/19] acpi: use queue_work_on() instead of binding workqueue worker to cpu0 Tejun Heo
2009-10-01  8:09 ` [PATCH 07/19] stop_machine: reimplement without using workqueue Tejun Heo
2009-10-06  9:36   ` Rusty Russell
2009-10-06 23:42     ` Tejun Heo
2009-10-01  8:09 ` [PATCH 08/19] workqueue: misc/cosmetic updates Tejun Heo
2009-10-01  8:09 ` [PATCH 09/19] workqueue: merge feature parametesr into flags Tejun Heo
2009-10-01  8:09 ` [PATCH 10/19] workqueue: update cwq alignement and make one more flag bit available Tejun Heo
2009-10-01  8:09 ` [PATCH 11/19] workqueue: define both bit position and mask for work flags Tejun Heo
2009-10-01  8:09 ` [PATCH 12/19] workqueue: separate out process_one_work() Tejun Heo
2009-10-01  8:09 ` [PATCH 13/19] workqueue: temporarily disable workqueue tracing Tejun Heo
2009-10-01  8:09 ` Tejun Heo [this message]
2009-10-01  8:09 ` [PATCH 15/19] workqueue: reimplement workqueue flushing using color coded works Tejun Heo
2009-10-01 17:03   ` Linus Torvalds
2009-10-01 17:11     ` Tejun Heo
2009-10-01 17:16       ` Tejun Heo
2009-10-01  8:09 ` [PATCH 16/19] workqueue: introduce worker Tejun Heo
2009-10-01  8:09 ` [PATCH 17/19] workqueue: reimplement work flushing using linked works Tejun Heo
2009-10-01  8:09 ` [PATCH 18/19] workqueue: reimplement workqueue freeze using cwq->frozen_works queue Tejun Heo
2009-10-01  8:09 ` [PATCH 19/19] workqueue: implement concurrency managed workqueue Tejun Heo
2009-10-01 14:49   ` Andrew Morton
2009-10-01 15:12     ` Ingo Molnar
2009-10-01 16:34     ` Tejun Heo
2009-10-04  8:49     ` Peter Zijlstra
2009-10-01 17:12   ` Linus Torvalds
2009-10-01 17:22     ` Tejun Heo
2009-10-01 17:55       ` Linus Torvalds
2009-10-02  0:42   ` Andi Kleen
2009-10-02 12:09     ` Tejun Heo
2009-10-03  2:59       ` Andi Kleen
2009-10-02 14:28   ` Frédéric Weisbecker
2009-10-01  8:24 ` [RFC PATCHSET] " Jens Axboe
2009-10-01 16:36   ` Tejun Heo
2009-10-01  8:24 ` Jens Axboe
2009-10-01 16:25   ` Tejun Heo
2009-10-01  8:40 ` Ingo Molnar
2009-10-01  8:47   ` Jens Axboe
2009-10-01  9:01     ` Ingo Molnar
2009-10-01  9:05       ` Jens Axboe
2009-10-01  9:11   ` Avi Kivity
2009-10-01  9:22     ` Avi Kivity
2009-10-01 16:55     ` Tejun Heo
2009-10-01 17:06       ` Avi Kivity
2009-10-01 16:43   ` Tejun Heo
2009-10-01 12:53 ` David Howells
2009-10-02 11:44   ` Tejun Heo
2009-10-02 12:45     ` Stefan Richter
2009-10-02 15:38   ` David Howells
2009-10-03  5:07     ` Tejun Heo
2009-10-01 12:57 ` [PATCH 06/19] acpi: use queue_work_on() instead of binding workqueue worker to cpu0 David Howells
2009-10-01 17:07   ` Tejun Heo
2009-10-01 13:05 ` [PATCH 10/19] workqueue: update cwq alignement and make one more flag bit available David Howells
2009-10-01 16:15   ` Jeff Garzik
2009-10-01 16:20   ` David Howells
2009-10-01 16:30     ` Tejun Heo
2009-10-01 16:39   ` Alan Cox
2009-10-01 18:45   ` Ben Pfaff
2009-10-02 11:56     ` Tejun Heo
2009-10-01 13:15 ` [PATCH 19/19] workqueue: implement concurrency managed workqueue David Howells
2009-10-02 12:03   ` Tejun Heo
2009-10-04  8:41 ` [RFC PATCHSET] " Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1254384558-1018-15-git-send-email-tj@kernel.org \
    --to=tj@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=arjan@linux.intel.com \
    --cc=cl@linux-foundation.org \
    --cc=dhowells@redhat.com \
    --cc=jeff@garzik.org \
    --cc=jens.axboe@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=rusty@rustcorp.com.au \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.