All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/3] locking/spinlock_debug: Change it to a mostly fair lock
@ 2017-02-01 21:13 Waiman Long
  2017-02-01 21:13 ` [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter Waiman Long
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Waiman Long @ 2017-02-01 21:13 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar; +Cc: linux-kernel, Waiman Long

 v1->v2:
  - Pack lockup and break_lock into a single 4-byte slot so as not
    to in increase spinlock size when GENERIC_LOCKBREAK is
    on. Hopefully that will be enough to fix a frame size too large
    warning in 0-day build.
  - Add a new patch to disable GENERIC_LOCKBREAK when DEBUG_LOCK_ALLOC
    is on.

The current debug spinlock implementation is a TATAS unfair lock. This
can occasionally lead to system lockup with a debug kernel because
of the unfairness of the lock rather than inherent locking problem.

This patch set changes the debug spinlock implementation to a
mostly fair spinlock based on the MCS lock similar to what is done
in qspinlock.

Waiman Long (3):
  locking/spinlock_debug: Reduce lockup suspected message clutter
  locking/spinlock_debug: Reduce lock cacheline contention
  locking/spinlock: Disable GENERIC_LOCKBREAK when DEBUG_LOCK_ALLOC is
    on

 arch/m32r/Kconfig               |  2 +-
 arch/parisc/Kconfig             |  2 +-
 arch/powerpc/Kconfig            |  2 +-
 arch/s390/Kconfig               |  2 +-
 arch/sh/Kconfig                 |  2 +-
 arch/sparc/Kconfig              |  2 +-
 include/linux/spinlock_types.h  |  9 +++--
 kernel/locking/spinlock_debug.c | 73 ++++++++++++++++++++++++++++++-----------
 8 files changed, 65 insertions(+), 29 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter
  2017-02-01 21:13 [PATCH v2 0/3] locking/spinlock_debug: Change it to a mostly fair lock Waiman Long
@ 2017-02-01 21:13 ` Waiman Long
  2017-02-02  2:21   ` kbuild test robot
  2017-02-01 21:13 ` [PATCH v2 2/3] locking/spinlock_debug: Reduce lock cacheline contention Waiman Long
  2017-02-01 21:13 ` [PATCH v2 3/3] locking/spinlock: Disable GENERIC_LOCKBREAK when DEBUG_LOCK_ALLOC is on Waiman Long
  2 siblings, 1 reply; 5+ messages in thread
From: Waiman Long @ 2017-02-01 21:13 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar; +Cc: linux-kernel, Waiman Long

When the debug spinlock code detects a lockup, it will print out an
error messages as well as the backtraces of all the CPUs. However, if
more than one CPUs are waiting on that lock, multiple lockup messages
will be printed leading to garbled output.

To reduce clutter in the console log, now only one of the lock waiters
will be allowed to print out the CPU backtraces.

Since break_lock, like lockup, can only have a value of 0 or 1, its
size is now reduced so that on a 64-bit architecture, the size of the
raw_spinlock structure won't increase whether CONFIG_GENERIC_LOCKBREAK
is defined or not.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 include/linux/spinlock_types.h  |  4 +++-
 kernel/locking/spinlock_debug.c | 26 +++++++++++++++++++++-----
 2 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb..99f28bd 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -20,9 +20,10 @@
 typedef struct raw_spinlock {
 	arch_spinlock_t raw_lock;
 #ifdef CONFIG_GENERIC_LOCKBREAK
-	unsigned int break_lock;
+	unsigned short break_lock;
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned short lockup;
 	unsigned int magic, owner_cpu;
 	void *owner;
 #endif
@@ -43,6 +44,7 @@
 
 #ifdef CONFIG_DEBUG_SPINLOCK
 # define SPIN_DEBUG_INIT(lockname)		\
+	.lockup = 0,				\
 	.magic = SPINLOCK_MAGIC,		\
 	.owner_cpu = -1,			\
 	.owner = SPINLOCK_OWNER_INIT,
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a59..0f880a8 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -27,6 +27,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 	lock->magic = SPINLOCK_MAGIC;
 	lock->owner = SPINLOCK_OWNER_INIT;
 	lock->owner_cpu = -1;
+	lock->lockup = 0;
 }
 
 EXPORT_SYMBOL(__raw_spin_lock_init);
@@ -101,6 +102,24 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
 							lock, "wrong CPU");
 	lock->owner = SPINLOCK_OWNER_INIT;
 	lock->owner_cpu = -1;
+	lock->lockup = 0;
+}
+
+static inline void __spin_lockup(raw_spinlock_t *lock)
+{
+	/*
+	 * lockup suspected:
+	 *
+	 * Only one of the lock waiters will be allowed to print the lockup
+	 * message in order to avoid an avalanche of lockup and backtrace
+	 * messages from different lock waiters of the same lock.
+	 */
+	if (!xchg(&lock->lockup, 1)) {
+		spin_dump(lock, "lockup suspected");
+#ifdef CONFIG_SMP
+		trigger_all_cpu_backtrace();
+#endif
+	}
 }
 
 static void __spin_lock_debug(raw_spinlock_t *lock)
@@ -113,11 +132,8 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
 			return;
 		__delay(1);
 	}
-	/* lockup suspected: */
-	spin_dump(lock, "lockup suspected");
-#ifdef CONFIG_SMP
-	trigger_all_cpu_backtrace();
-#endif
+
+	__spin_lockup(lock);
 
 	/*
 	 * The trylock above was causing a livelock.  Give the lower level arch
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 2/3] locking/spinlock_debug: Reduce lock cacheline contention
  2017-02-01 21:13 [PATCH v2 0/3] locking/spinlock_debug: Change it to a mostly fair lock Waiman Long
  2017-02-01 21:13 ` [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter Waiman Long
@ 2017-02-01 21:13 ` Waiman Long
  2017-02-01 21:13 ` [PATCH v2 3/3] locking/spinlock: Disable GENERIC_LOCKBREAK when DEBUG_LOCK_ALLOC is on Waiman Long
  2 siblings, 0 replies; 5+ messages in thread
From: Waiman Long @ 2017-02-01 21:13 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar; +Cc: linux-kernel, Waiman Long

The debug spinlock code is a basic TATAS unfair lock irrespective
of what the underlying architecture specific spinlock implementation
is. As a result, it is sometimes possible to trigger a false positive
"lockup suspected" warning with all the cpu backtraces.

This patch re-implements the debug spinlock as a fair MCS lock. This
reduces the chance of false positive warning messages. At the
same time, it also improves performance by reducing lock cacheline
contention.

Because there is a trylock before entering the MCS queue, this new
debug spinlock code also perform pretty well in a virtual machine
even if its vCPUSs are over-committed.

On a 4-socket 32-core 64-thread system, the performance of a locking
microbenchmark (locking rate and standard deviation) on a 4.9.6 based
debug kernel with and without the patch was as follows:

  32 locking threads:

  Kernel       Locking Rate    SD (execution time)
  ------       ------------    -------------------
  w/o patch     263.1 Mop/s         1.39s
  with patch    917.6 Mop/s         0.07s

  64 locking threads:

  Kernel       Locking Rate    SD (execution time)
  ------       ------------    -------------------
  w/o patch     368.3 Mop/s         6.88s
  with patch    733.0 Mop/s         0.09s

On a 2-socket 24-core 48-thread system, the performance of the same
locking microbenchmark (# of locking threads = # of vCPUs) on a KVM
guest are as follows:

  24 vCPUs:

  Kernel       Locking Rate    SD (execution time)
  ------       ------------    -------------------
  w/o patch     746.4 Mop/s         1.07s
  with patch   1323.6 Mop/s         0.20s

  48 vCPUs:

  Kernel       Locking Rate    SD (execution time)
  ------       ------------    -------------------
  w/o patch    1077.8 Mop/s         3.34s
  with patch   1090.4 Mop/s         0.29s

  72 vCPUs:

  Kernel       Locking Rate    SD (execution time)
  ------       ------------    -------------------
  w/o patch     944.5 Mop/s         3.96s
  with patch   1176.7 Mop/s         0.44s

  96 vCPUs:

  Kernel       Locking Rate    SD (execution time)
  ------       ------------    -------------------
  w/o patch     878.0 Mop/s         5.19s
  with patch   1017.0 Mop/s         0.83s

Signed-off-by: Waiman Long <longman@redhat.com>
---
 include/linux/spinlock_types.h  |  5 ++--
 kernel/locking/spinlock_debug.c | 53 +++++++++++++++++++++++++++--------------
 2 files changed, 38 insertions(+), 20 deletions(-)

diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 99f28bd..562af2d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -25,7 +25,7 @@
 #ifdef CONFIG_DEBUG_SPINLOCK
 	unsigned short lockup;
 	unsigned int magic, owner_cpu;
-	void *owner;
+	void *owner, *tail;
 #endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lockdep_map dep_map;
@@ -47,7 +47,8 @@
 	.lockup = 0,				\
 	.magic = SPINLOCK_MAGIC,		\
 	.owner_cpu = -1,			\
-	.owner = SPINLOCK_OWNER_INIT,
+	.owner = SPINLOCK_OWNER_INIT,		\
+	.tail = NULL,
 #else
 # define SPIN_DEBUG_INIT(lockname)
 #endif
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0f880a8..c58b61f 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -12,6 +12,7 @@
 #include <linux/debug_locks.h>
 #include <linux/delay.h>
 #include <linux/export.h>
+#include "mcs_spinlock.h"
 
 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 			  struct lock_class_key *key)
@@ -26,6 +27,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 	lock->magic = SPINLOCK_MAGIC;
 	lock->owner = SPINLOCK_OWNER_INIT;
+	lock->tail = NULL;
 	lock->owner_cpu = -1;
 	lock->lockup = 0;
 }
@@ -105,7 +107,7 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
 	lock->lockup = 0;
 }
 
-static inline void __spin_lockup(raw_spinlock_t *lock)
+static inline void __spin_chk_lockup(raw_spinlock_t *lock, u64 loops)
 {
 	/*
 	 * lockup suspected:
@@ -113,37 +115,52 @@ static inline void __spin_lockup(raw_spinlock_t *lock)
 	 * Only one of the lock waiters will be allowed to print the lockup
 	 * message in order to avoid an avalanche of lockup and backtrace
 	 * messages from different lock waiters of the same lock.
+	 *
+	 * With the original __deley(1) call, lockup can happen when both
+	 * threads of a hyperthreaded CPU core contend on the same lock. So
+	 * cpu_relax() is used here instead.
 	 */
-	if (!xchg(&lock->lockup, 1)) {
+	if (unlikely(!loops && !xchg(&lock->lockup, 1))) {
 		spin_dump(lock, "lockup suspected");
 #ifdef CONFIG_SMP
 		trigger_all_cpu_backtrace();
 #endif
 	}
+	cpu_relax();
 }
 
+/*
+ * The lock waiters are put into a MCS queue to maintain lock fairness
+ * as well as avoiding excessive contention on the lock cacheline. It
+ * also helps to reduce false positive because of unfairness instead
+ * of real lockup.
+ *
+ * The trylock before entering the MCS queue makes this code perform
+ * reasonably well in a virtual machine where some of the lock waiters
+ * may have their vCPUs preempted.
+ */
 static void __spin_lock_debug(raw_spinlock_t *lock)
 {
-	u64 i;
 	u64 loops = loops_per_jiffy * HZ;
-
-	for (i = 0; i < loops; i++) {
-		if (arch_spin_trylock(&lock->raw_lock))
-			return;
-		__delay(1);
+	struct mcs_spinlock node, *prev;
+
+	node.next = NULL;
+	node.locked = 0;
+	prev = xchg(&lock->tail, &node);
+	if (prev) {
+		WRITE_ONCE(prev->next, &node);
+		while (!READ_ONCE(node.locked))
+			__spin_chk_lockup(lock, loops--);
 	}
 
-	__spin_lockup(lock);
+	while (!arch_spin_trylock(&lock->raw_lock))
+		__spin_chk_lockup(lock, loops--);
 
-	/*
-	 * The trylock above was causing a livelock.  Give the lower level arch
-	 * specific lock code a chance to acquire the lock. We have already
-	 * printed a warning/backtrace at this point. The non-debug arch
-	 * specific code might actually succeed in acquiring the lock.  If it is
-	 * not successful, the end-result is the same - there is no forward
-	 * progress.
-	 */
-	arch_spin_lock(&lock->raw_lock);
+	if (cmpxchg(&lock->tail, &node, NULL) == &node)
+		return;
+	while (!READ_ONCE(node.next))
+		cpu_relax();
+	WRITE_ONCE(node.next->locked, 1);
 }
 
 void do_raw_spin_lock(raw_spinlock_t *lock)
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 3/3] locking/spinlock: Disable GENERIC_LOCKBREAK when DEBUG_LOCK_ALLOC is on
  2017-02-01 21:13 [PATCH v2 0/3] locking/spinlock_debug: Change it to a mostly fair lock Waiman Long
  2017-02-01 21:13 ` [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter Waiman Long
  2017-02-01 21:13 ` [PATCH v2 2/3] locking/spinlock_debug: Reduce lock cacheline contention Waiman Long
@ 2017-02-01 21:13 ` Waiman Long
  2 siblings, 0 replies; 5+ messages in thread
From: Waiman Long @ 2017-02-01 21:13 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar; +Cc: linux-kernel, Waiman Long

The break_lock variable defined when GENERIC_LOCKBREAK is on is used
only in kernel/locking/spinlock.c when

defined(CONFIG_GENERIC_LOCKBREAK) && !defined(CONFIG_DEBUG_LOCK_ALLOC).

As a result, there is no point in enabling GENERIC_LOCKBREAK to
define one more variable in the spinlock structure that is not going
to be used when DEBUG_LOCK_ALLOC is also on. This patch disables
GENERIC_LOCKBREAK under this circumstance.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 arch/m32r/Kconfig    | 2 +-
 arch/parisc/Kconfig  | 2 +-
 arch/powerpc/Kconfig | 2 +-
 arch/s390/Kconfig    | 2 +-
 arch/sh/Kconfig      | 2 +-
 arch/sparc/Kconfig   | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index d227a69..c0922e0 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -242,7 +242,7 @@ config IRAM_SIZE
 config GENERIC_LOCKBREAK
 	bool
 	default y
-	depends on SMP && PREEMPT
+	depends on SMP && PREEMPT && !DEBUG_LOCK_ALLOC
 
 config RWSEM_GENERIC_SPINLOCK
 	bool
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 3a71f38..bd8dbb9 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -56,7 +56,7 @@ config STACK_GROWSUP
 config GENERIC_LOCKBREAK
 	bool
 	default y
-	depends on SMP && PREEMPT
+	depends on SMP && PREEMPT && !DEBUG_LOCK_ALLOC
 
 config RWSEM_GENERIC_SPINLOCK
 	def_bool y
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a8ee573..feae0a3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -60,7 +60,7 @@ config RWSEM_XCHGADD_ALGORITHM
 config GENERIC_LOCKBREAK
 	bool
 	default y
-	depends on SMP && PREEMPT
+	depends on SMP && PREEMPT && !DEBUG_LOCK_ALLOC
 
 config ARCH_HAS_ILOG2_U32
 	bool
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c6722112..a2a33a6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -38,7 +38,7 @@ config ARCH_DMA_ADDR_T_64BIT
 	def_bool y
 
 config GENERIC_LOCKBREAK
-	def_bool y if SMP && PREEMPT
+	def_bool y if SMP && PREEMPT && !DEBUG_LOCK_ALLOC
 
 config PGSTE
 	def_bool y if KVM
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ee08695..e2eb35c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -105,7 +105,7 @@ config GENERIC_CALIBRATE_DELAY
 
 config GENERIC_LOCKBREAK
 	def_bool y
-	depends on SMP && PREEMPT
+	depends on SMP && PREEMPT && !DEBUG_LOCK_ALLOC
 
 config ARCH_SUSPEND_POSSIBLE
 	def_bool n
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index cf4034c..695a31a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -286,7 +286,7 @@ config US3_MC
 config GENERIC_LOCKBREAK
 	bool
 	default y
-	depends on SPARC64 && SMP && PREEMPT
+	depends on SPARC64 && SMP && PREEMPT && !DEBUG_LOCK_ALLOC
 
 config NUMA
 	bool "NUMA support"
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter
  2017-02-01 21:13 ` [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter Waiman Long
@ 2017-02-02  2:21   ` kbuild test robot
  0 siblings, 0 replies; 5+ messages in thread
From: kbuild test robot @ 2017-02-02  2:21 UTC (permalink / raw)
  To: Waiman Long
  Cc: kbuild-all, Peter Zijlstra, Ingo Molnar, linux-kernel, Waiman Long

[-- Attachment #1: Type: text/plain, Size: 2893 bytes --]

Hi Waiman,

[auto build test ERROR on tip/locking/core]
[also build test ERROR on v4.10-rc6 next-20170201]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Waiman-Long/locking-spinlock_debug-Change-it-to-a-mostly-fair-lock/20170202-052215
config: arm-omap2plus_defconfig (attached as .config)
compiler: arm-linux-gnueabi-gcc (Debian 6.1.1-9) 6.1.1 20160705
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=arm 

All errors (new ones prefixed by >>):

   kernel/built-in.o: In function `do_raw_spin_lock':
>> arch/arm/include/asm/cmpxchg.h:109: undefined reference to `__bad_xchg'
>> kernel/built-in.o:(.debug_addr+0x28c6c): undefined reference to `__bad_xchg'

vim +109 arch/arm/include/asm/cmpxchg.h

9f97da78 David Howells 2012-03-28   93  		asm volatile("@	__xchg1\n"
9f97da78 David Howells 2012-03-28   94  		"	swpb	%0, %1, [%2]"
9f97da78 David Howells 2012-03-28   95  			: "=&r" (ret)
9f97da78 David Howells 2012-03-28   96  			: "r" (x), "r" (ptr)
9f97da78 David Howells 2012-03-28   97  			: "memory", "cc");
9f97da78 David Howells 2012-03-28   98  		break;
9f97da78 David Howells 2012-03-28   99  	case 4:
9f97da78 David Howells 2012-03-28  100  		asm volatile("@	__xchg4\n"
9f97da78 David Howells 2012-03-28  101  		"	swp	%0, %1, [%2]"
9f97da78 David Howells 2012-03-28  102  			: "=&r" (ret)
9f97da78 David Howells 2012-03-28  103  			: "r" (x), "r" (ptr)
9f97da78 David Howells 2012-03-28  104  			: "memory", "cc");
9f97da78 David Howells 2012-03-28  105  		break;
9f97da78 David Howells 2012-03-28  106  #endif
9f97da78 David Howells 2012-03-28  107  	default:
31cd08c3 Russell King  2015-05-19  108  		/* Cause a link-time error, the xchg() size is not supported */
9f97da78 David Howells 2012-03-28 @109  		__bad_xchg(ptr, size), ret = 0;
9f97da78 David Howells 2012-03-28  110  		break;
9f97da78 David Howells 2012-03-28  111  	}
9f97da78 David Howells 2012-03-28  112  
9f97da78 David Howells 2012-03-28  113  	return ret;
9f97da78 David Howells 2012-03-28  114  }
9f97da78 David Howells 2012-03-28  115  
0ca326de Will Deacon   2015-08-06  116  #define xchg_relaxed(ptr, x) ({						\
e001bbae Russell King  2015-05-26  117  	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\

:::::: The code at line 109 was first introduced by commit
:::::: 9f97da78bf018206fb623cd351d454af2f105fe0 Disintegrate asm/system.h for ARM

:::::: TO: David Howells <dhowells@redhat.com>
:::::: CC: David Howells <dhowells@redhat.com>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 28763 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2017-02-02  2:21 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-01 21:13 [PATCH v2 0/3] locking/spinlock_debug: Change it to a mostly fair lock Waiman Long
2017-02-01 21:13 ` [PATCH v2 1/3] locking/spinlock_debug: Reduce lockup suspected message clutter Waiman Long
2017-02-02  2:21   ` kbuild test robot
2017-02-01 21:13 ` [PATCH v2 2/3] locking/spinlock_debug: Reduce lock cacheline contention Waiman Long
2017-02-01 21:13 ` [PATCH v2 3/3] locking/spinlock: Disable GENERIC_LOCKBREAK when DEBUG_LOCK_ALLOC is on Waiman Long

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.