All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lai Jiangshan <laijs@cn.fujitsu.com>
To: Tejun Heo <tj@kernel.org>, linux-kernel@vger.kernel.org
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Subject: [PATCH 13/21] workqueue: remove unused pwq_lock
Date: Wed, 20 Mar 2013 03:28:13 +0800	[thread overview]
Message-ID: <1363721306-2030-14-git-send-email-laijs@cn.fujitsu.com> (raw)
In-Reply-To: <1363721306-2030-1-git-send-email-laijs@cn.fujitsu.com>

all first_pwq() and for_each_pwq() are converted to be protected by
wq->mutex or RCU_SCHED, so freaky pwq_lock is unused.

simply remove it.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
---
 kernel/workqueue.c |   31 +++++++++++--------------------
 1 files changed, 11 insertions(+), 20 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a3460e7..e45f038 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -127,12 +127,9 @@ enum {
  *
  * PR: pools_mutex protected for writes.  Sched-RCU protected for reads.
  *
- * PW: pwq_lock protected.
- *
  * Q: wq->mutex protected.
  *
- * QR: wq->mutex and pwq_lock protected for writes.  Sched-RCU
- *     protected for reads.
+ * QR: wq->mutex protected for writes.  Sched-RCU protected for reads.
  *
  * MD: wq_mayday_lock protected.
  */
@@ -206,7 +203,7 @@ struct pool_workqueue {
 	 * Release of unbound pwq is punted to system_wq.  See put_pwq()
 	 * and pwq_unbound_release_workfn() for details.  pool_workqueue
 	 * itself is also sched-RCU protected so that the first pwq can be
-	 * determined without grabbing pwq_lock.
+	 * determined without grabbing wq->mutex.
 	 */
 	struct work_struct	unbound_release_work;
 	struct rcu_head		rcu;
@@ -260,7 +257,6 @@ static struct kmem_cache *pwq_cache;
 
 static DEFINE_MUTEX(wqs_mutex);		/* protects workqueues */
 static DEFINE_MUTEX(pools_mutex);	/* protects pools */
-static DEFINE_SPINLOCK(pwq_lock);	/* protects pool_workqueues */
 static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
 
 static LIST_HEAD(workqueues);		/* QS: list of all workqueues */
@@ -301,11 +297,10 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
 			   lockdep_is_held(&pools_mutex),		\
 			   "sched RCU or pools_mutex should be held")
 
-#define assert_rcu_or_pwq_lock(wq)					\
+#define assert_rcu_or_wq_mutex(wq)					\
 	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
-			   lockdep_is_held(&wq->mutex) ||		\
-			   lockdep_is_held(&pwq_lock),			\
-			   "sched RCU or pwq_lock should be held")
+			   lockdep_is_held(&wq->mutex),			\
+			   "sched RCU or wq->mutex should be held")
 
 #ifdef CONFIG_LOCKDEP
 #define assert_manager_or_pool_lock(pool)				\
@@ -359,7 +354,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
  * @pwq: iteration cursor
  * @wq: the target workqueue
  *
- * This must be called either with pwq_lock held or sched RCU read locked.
+ * This must be called either with wq->mutex held or sched RCU read locked.
  * If the pwq needs to be used beyond the locking in effect, the caller is
  * responsible for guaranteeing that the pwq stays online.
  *
@@ -368,7 +363,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
  */
 #define for_each_pwq(pwq, wq)						\
 	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)		\
-		if (({ assert_rcu_or_pwq_lock(wq); false; })) { }		\
+		if (({ assert_rcu_or_wq_mutex(wq); false; })) { }	\
 		else
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -507,13 +502,13 @@ static int worker_pool_assign_id(struct worker_pool *pool)
  * first_pwq - return the first pool_workqueue of the specified workqueue
  * @wq: the target workqueue
  *
- * This must be called either with pwq_lock held or sched RCU read locked.
+ * This must be called either with wq->mutex held or sched RCU read locked.
  * If the pwq needs to be used beyond the locking in effect, the caller is
  * responsible for guaranteeing that the pwq stays online.
  */
 static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
 {
-	assert_rcu_or_pwq_lock(wq);
+	assert_rcu_or_wq_mutex(wq);
 	return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue,
 				      pwqs_node);
 }
@@ -3551,9 +3546,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
 	 * and consistent with the linking path.
 	 */
 	mutex_lock(&wq->mutex);
-	spin_lock_irq(&pwq_lock);
 	list_del_rcu(&pwq->pwqs_node);
-	spin_unlock_irq(&pwq_lock);
 	mutex_unlock(&wq->mutex);
 
 	put_unbound_pool(pool);
@@ -3639,9 +3632,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
 	pwq_adjust_max_active(pwq);
 
 	/* link in @pwq */
-	spin_lock_irq(&pwq_lock);
 	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
-	spin_unlock_irq(&pwq_lock);
 
 	mutex_unlock(&wq->mutex);
 }
@@ -4290,7 +4281,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
  * pool->worklist.
  *
  * CONTEXT:
- * Grabs and releases wqs_mutex, pwq_lock and pool->lock's.
+ * Grabs and releases wqs_mutex, wq->mutex and pool->lock's.
  */
 void freeze_workqueues_begin(void)
 {
@@ -4377,7 +4368,7 @@ out_unlock:
  * frozen works are transferred to their respective pool worklists.
  *
  * CONTEXT:
- * Grabs and releases wqs_mutex, pwq_lock and pool->lock's.
+ * Grabs and releases wqs_mutex, wq->mutex and pool->lock's.
  */
 void thaw_workqueues(void)
 {
-- 
1.7.7.6


  parent reply	other threads:[~2013-03-19 19:31 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-19 19:28 [PATCH 00/21] workqueue: cleanups and better locking for recent changes Lai Jiangshan
2013-03-19 19:28 ` [PATCH 01/21] workqueue: add missing POOL_FREEZING Lai Jiangshan
2013-03-20 17:18   ` Tejun Heo
2013-03-20 17:24   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 02/21] workqueue: don't free pool->worker_idr by RCU Lai Jiangshan
2013-03-20 17:29   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 03/21] workqueue: simplify current_is_workqueue_rescuer() Lai Jiangshan
2013-03-20 17:33   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 04/21] workqueue: swap the two branches in pwq_adjust_max_active() to get better readability Lai Jiangshan
2013-03-20 17:39   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 05/21] workqueue: kick workers in pwq_adjust_max_active() Lai Jiangshan
2013-03-20 17:56   ` [PATCH] workqueue: kick a worker " Tejun Heo
2013-03-19 19:28 ` [PATCH 06/21] workqueue: separate out pools locking into pools_mutex Lai Jiangshan
2013-03-20 17:58   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 07/21] workqueue: rename wq_mutex to wqs_mutex Lai Jiangshan
2013-03-19 19:28 ` [PATCH 08/21] workqueue: rename wq->flush_mutex to wq->mutex Lai Jiangshan
2013-03-19 19:28 ` [PATCH 09/21] workqueue: use wq->mutex to protects ->nr_drainers and __WQ_DRAINING Lai Jiangshan
2013-03-19 19:28 ` [PATCH 10/21] workqueue: use rcu_read_lock_sched() instead for accessing pwq in RCU Lai Jiangshan
2013-03-20 18:01   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 11/21] workqueue: also allowed wq->mutex protect for_each_pwq() Lai Jiangshan
2013-03-19 19:28 ` [PATCH 12/21] workqueue: use wq->mutex to protect saved_max_active Lai Jiangshan
2013-03-19 19:28 ` Lai Jiangshan [this message]
2013-03-19 19:28 ` [PATCH 14/21] workqueue: add wq->freezing and remove POOL_FREEZING Lai Jiangshan
2013-03-19 19:28 ` [PATCH 15/21] workqueue: remove worker_maybe_bind_and_lock() Lai Jiangshan
2013-03-20 18:10   ` Tejun Heo
2013-03-21 11:03     ` Lai Jiangshan
2013-03-21 17:41       ` Tejun Heo
2013-03-19 19:28 ` [PATCH 16/21] workqueue: rename rebind_workers() to associate_cpu_pool() Lai Jiangshan
2013-03-19 19:28 ` [PATCH 17/21] workqueue: simplify workqueue_cpu_up_callback(CPU_ONLINE) Lai Jiangshan
2013-03-20 18:16   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 18/21] workqueue: read POOL_DISASSOCIATED bit under pool->lock Lai Jiangshan
2013-03-20 18:19   ` Tejun Heo
2013-03-19 19:28 ` [PATCH 19/21] workqueue: remove @p_last_pwq from init_and_link_pwq() Lai Jiangshan
2013-03-19 19:28 ` [PATCH 20/21] workqueue: modify wq->freezing only when freezable Lai Jiangshan
2013-03-19 19:28 ` [PATCH 21/21] workqueue: avoid false negative in assert_manager_or_pool_lock() Lai Jiangshan
2013-03-20 18:22   ` Tejun Heo
2013-03-20 16:38 ` [PATCH 00/21] workqueue: cleanups and better locking for recent changes Lai Jiangshan
2013-03-20 16:40   ` Tejun Heo
2013-03-20 18:30 ` Tejun Heo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1363721306-2030-14-git-send-email-laijs@cn.fujitsu.com \
    --to=laijs@cn.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=tj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.