All of lore.kernel.org
 help / color / mirror / Atom feed
From: tip-bot for Tim Chen <tipbot@zytor.com>
To: linux-tip-commits@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, hpa@zytor.com, mingo@kernel.org,
	torvalds@linux-foundation.org, peterz@infradead.org,
	tim.c.chen@linux.intel.com, paulmck@linux.vnet.ibm.com,
	akpm@linux-foundation.org, tglx@linutronix.de, davidlohr@hp.com
Subject: [tip:core/locking] locking/mutexes/mcs: Restructure the MCS lock defines and locking code into its own file
Date: Tue, 28 Jan 2014 11:22:56 -0800	[thread overview]
Message-ID: <tip-e72246748ff006ab928bc774e276e6ef5542f9c5@git.kernel.org> (raw)
In-Reply-To: <1390347360.3138.63.camel@schen9-DESK>

Commit-ID:  e72246748ff006ab928bc774e276e6ef5542f9c5
Gitweb:     http://git.kernel.org/tip/e72246748ff006ab928bc774e276e6ef5542f9c5
Author:     Tim Chen <tim.c.chen@linux.intel.com>
AuthorDate: Tue, 21 Jan 2014 15:36:00 -0800
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 Jan 2014 13:13:27 +0100

locking/mutexes/mcs: Restructure the MCS lock defines and locking code into its own file

We will need the MCS lock code for doing optimistic spinning for rwsem
and queued rwlock.  Extracting the MCS code from mutex.c and put into
its own file allow us to reuse this code easily.

We also inline mcs_spin_lock and mcs_spin_unlock functions
for better efficiency.

Note that using the smp_load_acquire/smp_store_release pair used in
mcs_lock and mcs_unlock is not sufficient to form a full memory barrier
across cpus for many architectures (except x86).  For applications that
absolutely need a full barrier across multiple cpus with mcs_unlock and
mcs_lock pair, smp_mb__after_unlock_lock() should be used after mcs_lock.

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1390347360.3138.63.camel@schen9-DESK
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/mcs_spinlock.h | 77 ++++++++++++++++++++++++++++++++++++++++++++
 include/linux/mutex.h        |  5 +--
 kernel/locking/mutex.c       | 68 ++++----------------------------------
 3 files changed, 87 insertions(+), 63 deletions(-)

diff --git a/include/linux/mcs_spinlock.h b/include/linux/mcs_spinlock.h
new file mode 100644
index 0000000..9578ef8
--- /dev/null
+++ b/include/linux/mcs_spinlock.h
@@ -0,0 +1,77 @@
+/*
+ * MCS lock defines
+ *
+ * This file contains the main data structure and API definitions of MCS lock.
+ *
+ * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
+ * with the desirable properties of being fair, and with each cpu trying
+ * to acquire the lock spinning on a local variable.
+ * It avoids expensive cache bouncings that common test-and-set spin-lock
+ * implementations incur.
+ */
+#ifndef __LINUX_MCS_SPINLOCK_H
+#define __LINUX_MCS_SPINLOCK_H
+
+struct mcs_spinlock {
+	struct mcs_spinlock *next;
+	int locked; /* 1 if lock acquired */
+};
+
+/*
+ * Note: the smp_load_acquire/smp_store_release pair is not
+ * sufficient to form a full memory barrier across
+ * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
+ * For applications that need a full barrier across multiple cpus
+ * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
+ * used after mcs_lock.
+ */
+static inline
+void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+{
+	struct mcs_spinlock *prev;
+
+	/* Init node */
+	node->locked = 0;
+	node->next   = NULL;
+
+	prev = xchg(lock, node);
+	if (likely(prev == NULL)) {
+		/* Lock acquired */
+		node->locked = 1;
+		return;
+	}
+	ACCESS_ONCE(prev->next) = node;
+	/*
+	 * Wait until the lock holder passes the lock down.
+	 * Using smp_load_acquire() provides a memory barrier that
+	 * ensures subsequent operations happen after the lock is acquired.
+	 */
+	while (!(smp_load_acquire(&node->locked)))
+		arch_mutex_cpu_relax();
+}
+
+static inline
+void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+{
+	struct mcs_spinlock *next = ACCESS_ONCE(node->next);
+
+	if (likely(!next)) {
+		/*
+		 * Release the lock by setting it to NULL
+		 */
+		if (cmpxchg(lock, node, NULL) == node)
+			return;
+		/* Wait until the next pointer is set */
+		while (!(next = ACCESS_ONCE(node->next)))
+			arch_mutex_cpu_relax();
+	}
+	/*
+	 * Pass lock to next waiter.
+	 * smp_store_release() provides a memory barrier to ensure
+	 * all operations in the critical section has been completed
+	 * before unlocking.
+	 */
+	smp_store_release(&next->locked, 1);
+}
+
+#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index d318193..c482e1d 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -46,6 +46,7 @@
  * - detects multi-task circular deadlocks and prints out all affected
  *   locks and tasks (and only those tasks)
  */
+struct mcs_spinlock;
 struct mutex {
 	/* 1: unlocked, 0: locked, negative: locked, possible waiters */
 	atomic_t		count;
@@ -55,7 +56,7 @@ struct mutex {
 	struct task_struct	*owner;
 #endif
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-	void			*spin_mlock;	/* Spinner MCS lock */
+	struct mcs_spinlock	*mcs_lock;	/* Spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_MUTEXES
 	const char 		*name;
@@ -179,4 +180,4 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 # define arch_mutex_cpu_relax() cpu_relax()
 #endif
 
-#endif
+#endif /* __LINUX_MUTEX_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index fbbd2ed..45fe1b5 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
+#include <linux/mcs_spinlock.h>
 
 /*
  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -52,7 +53,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 	INIT_LIST_HEAD(&lock->wait_list);
 	mutex_clear_owner(lock);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-	lock->spin_mlock = NULL;
+	lock->mcs_lock = NULL;
 #endif
 
 	debug_mutex_init(lock, name, key);
@@ -111,62 +112,7 @@ EXPORT_SYMBOL(mutex_lock);
  * more or less simultaneously, the spinners need to acquire a MCS lock
  * first before spinning on the owner field.
  *
- * We don't inline mspin_lock() so that perf can correctly account for the
- * time spent in this lock function.
  */
-struct mspin_node {
-	struct mspin_node *next ;
-	int		  locked;	/* 1 if lock acquired */
-};
-#define	MLOCK(mutex)	((struct mspin_node **)&((mutex)->spin_mlock))
-
-static noinline
-void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
-{
-	struct mspin_node *prev;
-
-	/* Init node */
-	node->locked = 0;
-	node->next   = NULL;
-
-	prev = xchg(lock, node);
-	if (likely(prev == NULL)) {
-		/* Lock acquired */
-		node->locked = 1;
-		return;
-	}
-	ACCESS_ONCE(prev->next) = node;
-	/*
-	 * Wait until the lock holder passes the lock down.
-	 * Using smp_load_acquire() provides a memory barrier that
-	 * ensures subsequent operations happen after the lock is acquired.
-	 */
-	while (!(smp_load_acquire(&node->locked)))
-		arch_mutex_cpu_relax();
-}
-
-static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
-{
-	struct mspin_node *next = ACCESS_ONCE(node->next);
-
-	if (likely(!next)) {
-		/*
-		 * Release the lock by setting it to NULL
-		 */
-		if (cmpxchg(lock, node, NULL) == node)
-			return;
-		/* Wait until the next pointer is set */
-		while (!(next = ACCESS_ONCE(node->next)))
-			arch_mutex_cpu_relax();
-	}
-	/*
-	 * Pass lock to next waiter.
-	 * smp_store_release() provides a memory barrier to ensure
-	 * all operations in the critical section has been completed
-	 * before unlocking.
-	 */
-	smp_store_release(&next->locked, 1);
-}
 
 /*
  * Mutex spinning code migrated from kernel/sched/core.c
@@ -456,7 +402,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
 	for (;;) {
 		struct task_struct *owner;
-		struct mspin_node  node;
+		struct mcs_spinlock  node;
 
 		if (use_ww_ctx && ww_ctx->acquired > 0) {
 			struct ww_mutex *ww;
@@ -478,10 +424,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * If there's an owner, wait for it to either
 		 * release the lock or go to sleep.
 		 */
-		mspin_lock(MLOCK(lock), &node);
+		mcs_spin_lock(&lock->mcs_lock, &node);
 		owner = ACCESS_ONCE(lock->owner);
 		if (owner && !mutex_spin_on_owner(lock, owner)) {
-			mspin_unlock(MLOCK(lock), &node);
+			mcs_spin_unlock(&lock->mcs_lock, &node);
 			goto slowpath;
 		}
 
@@ -496,11 +442,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 			}
 
 			mutex_set_owner(lock);
-			mspin_unlock(MLOCK(lock), &node);
+			mcs_spin_unlock(&lock->mcs_lock, &node);
 			preempt_enable();
 			return 0;
 		}
-		mspin_unlock(MLOCK(lock), &node);
+		mcs_spin_unlock(&lock->mcs_lock, &node);
 
 		/*
 		 * When there's no owner, we might have preempted between the

  reply	other threads:[~2014-01-28 19:24 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <cover.1390320729.git.tim.c.chen@linux.intel.com>
2014-01-21 23:35 ` [PATCH v9 0/6] MCS Lock: MCS lock code cleanup and optimizations Tim Chen
2014-01-21 23:35   ` Tim Chen
2014-01-21 23:35 ` [PATCH v9 1/6] MCS Lock: Barrier corrections Tim Chen
2014-01-21 23:35   ` Tim Chen
2014-01-22 10:13   ` Will Deacon
2014-01-22 10:13     ` Will Deacon
2014-01-28 19:22   ` [tip:core/locking] locking/mutexes/mcs: Correct barrier usage tip-bot for Waiman Long
2014-01-21 23:36 ` [PATCH v9 2/6] MCS Lock: Restructure the MCS lock defines and locking code into its own file Tim Chen
2014-01-21 23:36   ` Tim Chen
2014-01-28 19:22   ` tip-bot for Tim Chen [this message]
2014-01-21 23:36 ` [PATCH v9 3/6] MCS Lock: Optimizations and extra comments Tim Chen
2014-01-21 23:36   ` Tim Chen
2014-01-28 19:23   ` [tip:core/locking] locking/mcs: Micro-optimize the MCS code, add " tip-bot for Jason Low
2014-01-21 23:36 ` [PATCH v9 4/6] MCS Lock: Allow architectures to hook in to contended paths Tim Chen
2014-01-21 23:36   ` Tim Chen
2014-01-28 19:23   ` [tip:core/locking] locking/mcs: " tip-bot for Will Deacon
2014-01-21 23:36 ` [PATCH v9 5/6] MCS Lock: Order the header files in Kbuild of each architecture in alphabetical order Tim Chen
2014-01-21 23:36   ` Tim Chen
2014-01-22 13:08   ` Peter Zijlstra
2014-01-22 13:08     ` Peter Zijlstra
2014-01-22 17:41     ` Tim Chen
2014-01-22 17:41       ` Tim Chen
2014-01-22 17:51       ` Peter Zijlstra
2014-01-22 17:51         ` Peter Zijlstra
2014-01-22 18:18     ` Peter Zijlstra
2014-01-22 18:18       ` Peter Zijlstra
2014-01-22 18:25     ` Peter Zijlstra
2014-01-22 18:25       ` Peter Zijlstra
2014-01-22 18:41       ` Tim Chen
2014-01-22 18:41         ` Tim Chen
2014-01-21 23:36 ` [PATCH v9 6/6] MCS Lock: Allow architecture specific asm files to be used for contended case Tim Chen
2014-01-21 23:36   ` Tim Chen
2014-01-22 18:35   ` Peter Zijlstra
2014-01-22 18:35     ` Peter Zijlstra
2014-01-22 18:35     ` Peter Zijlstra
2014-01-24 17:03     ` Tim Chen
2014-01-24 17:03       ` Tim Chen
2014-01-24 17:03       ` Tim Chen
2014-01-24 17:07       ` Peter Zijlstra
2014-01-24 17:07         ` Peter Zijlstra
2014-01-24 17:07         ` Peter Zijlstra
2014-01-24 17:33         ` Linus Torvalds
2014-01-24 17:33           ` Linus Torvalds
2014-01-24 17:37           ` Will Deacon
2014-01-24 17:37             ` Will Deacon
2014-02-10 13:32   ` [tip:core/locking] locking/mcs: " tip-bot for Tim Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=tip-e72246748ff006ab928bc774e276e6ef5542f9c5@git.kernel.org \
    --to=tipbot@zytor.com \
    --cc=akpm@linux-foundation.org \
    --cc=davidlohr@hp.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=tim.c.chen@linux.intel.com \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.