linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] locking/ww_mutex: Fix locktorture ww_mutex test problems
@ 2021-03-16 15:31 Waiman Long
  2021-03-16 15:31 ` [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling Waiman Long
                   ` (3 more replies)
  0 siblings, 4 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-16 15:31 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, Davidlohr Bueso
  Cc: linux-kernel, Juri Lelli, Waiman Long

It was found that lockdep splat was produced whenever the ww_mutex
locktorture test was run on a kernel with lockdep enabled. It turns out
that there are bugs both in the ww_mutex and the locktorture code. This
patch series fix these bugs so that the ww_mutex locktorture test is
able to run without producing unexpected lockdep splat.

Patches 1 & 2 are clean-up patches for ww_mutex. Patch 3 fixes the lockdep
bug in ww_mutex and patch 4 fixes a bug in the locktorture code.

Waiman Long (4):
  locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  locking/ww_mutex: Fix acquire/release imbalance in
    ww_acquire_init()/ww_acquire_fini()
  locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex
    test

 include/linux/ww_mutex.h     |  5 ++-
 kernel/locking/locktorture.c | 86 +++++++++++++++++++++++-------------
 kernel/locking/mutex.c       | 30 ++++++++-----
 3 files changed, 77 insertions(+), 44 deletions(-)

-- 
2.18.1


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-16 15:31 [PATCH 0/4] locking/ww_mutex: Fix locktorture ww_mutex test problems Waiman Long
@ 2021-03-16 15:31 ` Waiman Long
  2021-03-16 18:55   ` Davidlohr Bueso
  2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
  2021-03-16 15:31 ` [PATCH 2/4] locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini() Waiman Long
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-16 15:31 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, Davidlohr Bueso
  Cc: linux-kernel, Juri Lelli, Waiman Long

The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the
function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx)
combination is repetitive.

In fact, ww_ctx should not be used at all if !use_ww_ctx.  Simplify
ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an
clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx &&
ww_ctx) by just (ww_ctx).

Signed-off-by: Waiman Long <longman@redhat.com>
---
 kernel/locking/mutex.c | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index adb935090768..622ebdfcd083 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
  */
 static __always_inline bool
 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
+		      struct mutex_waiter *waiter)
 {
 	if (!waiter) {
 		/*
@@ -702,7 +702,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 #else
 static __always_inline bool
 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
+		      struct mutex_waiter *waiter)
 {
 	return false;
 }
@@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	struct ww_mutex *ww;
 	int ret;
 
+	if (!use_ww_ctx)
+		ww_ctx = NULL;
+
 	might_sleep();
 
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 #endif
 
 	ww = container_of(lock, struct ww_mutex, base);
-	if (use_ww_ctx && ww_ctx) {
+	if (ww_ctx) {
 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
 			return -EALREADY;
 
@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
 	if (__mutex_trylock(lock) ||
-	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
+	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
 		/* got the lock, yay! */
 		lock_acquired(&lock->dep_map, ip);
-		if (use_ww_ctx && ww_ctx)
+		if (ww_ctx)
 			ww_mutex_set_context_fastpath(ww, ww_ctx);
 		preempt_enable();
 		return 0;
@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	 * After waiting to acquire the wait_lock, try again.
 	 */
 	if (__mutex_trylock(lock)) {
-		if (use_ww_ctx && ww_ctx)
+		if (ww_ctx)
 			__ww_mutex_check_waiters(lock, ww_ctx);
 
 		goto skip_wait;
@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 			goto err;
 		}
 
-		if (use_ww_ctx && ww_ctx) {
+		if (ww_ctx) {
 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
 			if (ret)
 				goto err;
@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * ww_mutex needs to always recheck its position since its waiter
 		 * list is not FIFO ordered.
 		 */
-		if ((use_ww_ctx && ww_ctx) || !first) {
+		if (ww_ctx || !first) {
 			first = __mutex_waiter_is_first(lock, &waiter);
 			if (first)
 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * or we must see its unlock and acquire.
 		 */
 		if (__mutex_trylock(lock) ||
-		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
+		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
 			break;
 
 		spin_lock(&lock->wait_lock);
@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 acquired:
 	__set_current_state(TASK_RUNNING);
 
-	if (use_ww_ctx && ww_ctx) {
+	if (ww_ctx) {
 		/*
 		 * Wound-Wait; we stole the lock (!first_waiter), check the
 		 * waiters as anyone might want to wound us.
@@ -1068,7 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	/* got the lock - cleanup and rejoice! */
 	lock_acquired(&lock->dep_map, ip);
 
-	if (use_ww_ctx && ww_ctx)
+	if (ww_ctx)
 		ww_mutex_lock_acquired(ww, ww_ctx);
 
 	spin_unlock(&lock->wait_lock);
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 2/4] locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini()
  2021-03-16 15:31 [PATCH 0/4] locking/ww_mutex: Fix locktorture ww_mutex test problems Waiman Long
  2021-03-16 15:31 ` [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling Waiman Long
@ 2021-03-16 15:31 ` Waiman Long
  2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
  2021-03-16 15:31 ` [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock Waiman Long
  2021-03-16 15:31 ` [PATCH 4/4] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test Waiman Long
  3 siblings, 1 reply; 32+ messages in thread
From: Waiman Long @ 2021-03-16 15:31 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, Davidlohr Bueso
  Cc: linux-kernel, Juri Lelli, Waiman Long

In ww_acquire_init(), mutex_acquire() is gated by CONFIG_DEBUG_LOCK_ALLOC.
The dep_map in the ww_acquire_ctx structure is also gated by the
same config. However mutex_release() in ww_acquire_fini() is gated by
CONFIG_DEBUG_MUTEXES. It is possible to set CONFIG_DEBUG_MUTEXES without
setting CONFIG_DEBUG_LOCK_ALLOC though it is an unlikely configuration.
That may cause a compilation error as dep_map isn't defined in this
case. Fix this potential problem by enclosing mutex_release() inside
CONFIG_DEBUG_LOCK_ALLOC.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 include/linux/ww_mutex.h | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 850424e5d030..6ecf2a0220db 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
  */
 static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
 	mutex_release(&ctx->dep_map, _THIS_IP_);
-
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
 	DEBUG_LOCKS_WARN_ON(ctx->acquired);
 	if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
 		/*
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-16 15:31 [PATCH 0/4] locking/ww_mutex: Fix locktorture ww_mutex test problems Waiman Long
  2021-03-16 15:31 ` [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling Waiman Long
  2021-03-16 15:31 ` [PATCH 2/4] locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini() Waiman Long
@ 2021-03-16 15:31 ` Waiman Long
  2021-03-17  3:01   ` Davidlohr Bueso
                     ` (2 more replies)
  2021-03-16 15:31 ` [PATCH 4/4] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test Waiman Long
  3 siblings, 3 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-16 15:31 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, Davidlohr Bueso
  Cc: linux-kernel, Juri Lelli, Waiman Long

It was found that running the ww_mutex_lock-torture test produced the
following lockdep splat almost immediately:

[  103.892638] ======================================================
[  103.892639] WARNING: possible circular locking dependency detected
[  103.892641] 5.12.0-rc3-debug+ #2 Tainted: G S      W
[  103.892643] ------------------------------------------------------
[  103.892643] lock_torture_wr/3234 is trying to acquire lock:
[  103.892646] ffffffffc0b35b10 (torture_ww_mutex_2.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892660]
[  103.892660] but task is already holding lock:
[  103.892661] ffffffffc0b35cd0 (torture_ww_mutex_0.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x3e2/0x720 [locktorture]
[  103.892669]
[  103.892669] which lock already depends on the new lock.
[  103.892669]
[  103.892670]
[  103.892670] the existing dependency chain (in reverse order) is:
[  103.892671]
[  103.892671] -> #2 (torture_ww_mutex_0.base){+.+.}-{3:3}:
[  103.892675]        lock_acquire+0x1c5/0x830
[  103.892682]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
[  103.892687]        ww_mutex_lock+0x4b/0x180
[  103.892690]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892694]        lock_torture_writer+0x142/0x3a0 [locktorture]
[  103.892698]        kthread+0x35f/0x430
[  103.892701]        ret_from_fork+0x1f/0x30
[  103.892706]
[  103.892706] -> #1 (torture_ww_mutex_1.base){+.+.}-{3:3}:
[  103.892709]        lock_acquire+0x1c5/0x830
[  103.892712]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
[  103.892715]        ww_mutex_lock+0x4b/0x180
[  103.892717]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892721]        lock_torture_writer+0x142/0x3a0 [locktorture]
[  103.892725]        kthread+0x35f/0x430
[  103.892727]        ret_from_fork+0x1f/0x30
[  103.892730]
[  103.892730] -> #0 (torture_ww_mutex_2.base){+.+.}-{3:3}:
[  103.892733]        check_prevs_add+0x3fd/0x2470
[  103.892736]        __lock_acquire+0x2602/0x3100
[  103.892738]        lock_acquire+0x1c5/0x830
[  103.892740]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
[  103.892743]        ww_mutex_lock+0x4b/0x180
[  103.892746]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892749]        lock_torture_writer+0x142/0x3a0 [locktorture]
[  103.892753]        kthread+0x35f/0x430
[  103.892755]        ret_from_fork+0x1f/0x30
[  103.892757]
[  103.892757] other info that might help us debug this:
[  103.892757]
[  103.892758] Chain exists of:
[  103.892758]   torture_ww_mutex_2.base --> torture_ww_mutex_1.base --> torture_ww_mutex_0.base
[  103.892758]
[  103.892763]  Possible unsafe locking scenario:
[  103.892763]
[  103.892764]        CPU0                    CPU1
[  103.892765]        ----                    ----
[  103.892765]   lock(torture_ww_mutex_0.base);
[  103.892767] 				      lock(torture_ww_mutex_1.base);
[  103.892770] 				      lock(torture_ww_mutex_0.base);
[  103.892772]   lock(torture_ww_mutex_2.base);
[  103.892774]
[  103.892774]  *** DEADLOCK ***

Since ww_mutex is supposed to be deadlock-proof if used properly, such
deadlock scenario should not happen. To avoid this false positive splat,
treat ww_mutex_lock() like a trylock().

After applying this patch, the locktorture test can run for a long time
without triggering the circular locking dependency splat.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 kernel/locking/mutex.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 622ebdfcd083..bb89393cd3a2 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -946,7 +946,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	}
 
 	preempt_disable();
-	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+	/*
+	 * Treat as trylock for ww_mutex.
+	 */
+	mutex_acquire_nest(&lock->dep_map, subclass, !!ww_ctx, nest_lock, ip);
 
 	if (__mutex_trylock(lock) ||
 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 4/4] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test
  2021-03-16 15:31 [PATCH 0/4] locking/ww_mutex: Fix locktorture ww_mutex test problems Waiman Long
                   ` (2 preceding siblings ...)
  2021-03-16 15:31 ` [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock Waiman Long
@ 2021-03-16 15:31 ` Waiman Long
  2021-03-17  5:16   ` Davidlohr Bueso
  3 siblings, 1 reply; 32+ messages in thread
From: Waiman Long @ 2021-03-16 15:31 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, Davidlohr Bueso
  Cc: linux-kernel, Juri Lelli, Waiman Long

The ww_acquire_ctx structure for ww_mutex needs to persist for a complete
lock/unlock cycle. In the ww_mutex test in locktorture, however, both
ww_acquire_init() and ww_acquire_fini() are called within the lock
function only. This causes a lockdep splat of "WARNING: Nested lock
was not taken" when lockdep is enabled in the kernel.

To fix this problem, we need to move the ww_acquire_fini() after the
ww_mutex_unlock() in torture_ww_mutex_unlock(). In other word, we need
to pass state information from the lock function to the unlock function.
Change the writelock and writeunlock function prototypes to allow that
and change the torture_ww_mutex_lock() and torture_ww_mutex_unlock()
accordingly.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 kernel/locking/locktorture.c | 86 +++++++++++++++++++++++-------------
 1 file changed, 56 insertions(+), 30 deletions(-)

diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 0ab94e1f1276..782925dcf5d9 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -76,10 +76,10 @@ static void lock_torture_cleanup(void);
 struct lock_torture_ops {
 	void (*init)(void);
 	void (*exit)(void);
-	int (*writelock)(void);
+	int (*writelock)(void **parg);
 	void (*write_delay)(struct torture_random_state *trsp);
 	void (*task_boost)(struct torture_random_state *trsp);
-	void (*writeunlock)(void);
+	void (*writeunlock)(void *arg);
 	int (*readlock)(void);
 	void (*read_delay)(struct torture_random_state *trsp);
 	void (*readunlock)(void);
@@ -105,7 +105,7 @@ static struct lock_torture_cxt cxt = { 0, 0, false, false,
  * Definitions for lock torture testing.
  */
 
-static int torture_lock_busted_write_lock(void)
+static int torture_lock_busted_write_lock(void **parg __maybe_unused)
 {
 	return 0;  /* BUGGY, do not use in real life!!! */
 }
@@ -122,7 +122,7 @@ static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
 		torture_preempt_schedule();  /* Allow test to be preempted. */
 }
 
-static void torture_lock_busted_write_unlock(void)
+static void torture_lock_busted_write_unlock(void *arg __maybe_unused)
 {
 	  /* BUGGY, do not use in real life!!! */
 }
@@ -145,7 +145,8 @@ static struct lock_torture_ops lock_busted_ops = {
 
 static DEFINE_SPINLOCK(torture_spinlock);
 
-static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
+static int torture_spin_lock_write_lock(void **parg __maybe_unused)
+__acquires(torture_spinlock)
 {
 	spin_lock(&torture_spinlock);
 	return 0;
@@ -169,7 +170,8 @@ static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
 		torture_preempt_schedule();  /* Allow test to be preempted. */
 }
 
-static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
+static void torture_spin_lock_write_unlock(void *arg __maybe_unused)
+__releases(torture_spinlock)
 {
 	spin_unlock(&torture_spinlock);
 }
@@ -185,7 +187,7 @@ static struct lock_torture_ops spin_lock_ops = {
 	.name		= "spin_lock"
 };
 
-static int torture_spin_lock_write_lock_irq(void)
+static int torture_spin_lock_write_lock_irq(void **parg __maybe_unused)
 __acquires(torture_spinlock)
 {
 	unsigned long flags;
@@ -195,7 +197,7 @@ __acquires(torture_spinlock)
 	return 0;
 }
 
-static void torture_lock_spin_write_unlock_irq(void)
+static void torture_lock_spin_write_unlock_irq(void *arg __maybe_unused)
 __releases(torture_spinlock)
 {
 	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
@@ -214,7 +216,8 @@ static struct lock_torture_ops spin_lock_irq_ops = {
 
 static DEFINE_RWLOCK(torture_rwlock);
 
-static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
+static int torture_rwlock_write_lock(void **parg __maybe_unused)
+__acquires(torture_rwlock)
 {
 	write_lock(&torture_rwlock);
 	return 0;
@@ -235,7 +238,8 @@ static void torture_rwlock_write_delay(struct torture_random_state *trsp)
 		udelay(shortdelay_us);
 }
 
-static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
+static void torture_rwlock_write_unlock(void *arg __maybe_unused)
+__releases(torture_rwlock)
 {
 	write_unlock(&torture_rwlock);
 }
@@ -277,7 +281,8 @@ static struct lock_torture_ops rw_lock_ops = {
 	.name		= "rw_lock"
 };
 
-static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
+static int torture_rwlock_write_lock_irq(void **parg __maybe_unused)
+__acquires(torture_rwlock)
 {
 	unsigned long flags;
 
@@ -286,7 +291,7 @@ static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
 	return 0;
 }
 
-static void torture_rwlock_write_unlock_irq(void)
+static void torture_rwlock_write_unlock_irq(void *arg __maybe_unused)
 __releases(torture_rwlock)
 {
 	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
@@ -320,7 +325,8 @@ static struct lock_torture_ops rw_lock_irq_ops = {
 
 static DEFINE_MUTEX(torture_mutex);
 
-static int torture_mutex_lock(void) __acquires(torture_mutex)
+static int torture_mutex_lock(void **parg __maybe_unused)
+__acquires(torture_mutex)
 {
 	mutex_lock(&torture_mutex);
 	return 0;
@@ -340,7 +346,8 @@ static void torture_mutex_delay(struct torture_random_state *trsp)
 		torture_preempt_schedule();  /* Allow test to be preempted. */
 }
 
-static void torture_mutex_unlock(void) __releases(torture_mutex)
+static void torture_mutex_unlock(void *arg __maybe_unused)
+__releases(torture_mutex)
 {
 	mutex_unlock(&torture_mutex);
 }
@@ -362,7 +369,7 @@ static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
 
-static int torture_ww_mutex_lock(void)
+static int torture_ww_mutex_lock(void **parg)
 __acquires(torture_ww_mutex_0)
 __acquires(torture_ww_mutex_1)
 __acquires(torture_ww_mutex_2)
@@ -372,7 +379,12 @@ __acquires(torture_ww_mutex_2)
 		struct list_head link;
 		struct ww_mutex *lock;
 	} locks[3], *ll, *ln;
-	struct ww_acquire_ctx ctx;
+	struct ww_acquire_ctx *ctx;
+
+	ctx = kmalloc(sizeof(struct ww_acquire_ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+	*parg = ctx;
 
 	locks[0].lock = &torture_ww_mutex_0;
 	list_add(&locks[0].link, &list);
@@ -383,12 +395,12 @@ __acquires(torture_ww_mutex_2)
 	locks[2].lock = &torture_ww_mutex_2;
 	list_add(&locks[2].link, &list);
 
-	ww_acquire_init(&ctx, &torture_ww_class);
+	ww_acquire_init(ctx, &torture_ww_class);
 
 	list_for_each_entry(ll, &list, link) {
 		int err;
 
-		err = ww_mutex_lock(ll->lock, &ctx);
+		err = ww_mutex_lock(ll->lock, ctx);
 		if (!err)
 			continue;
 
@@ -399,22 +411,28 @@ __acquires(torture_ww_mutex_2)
 		if (err != -EDEADLK)
 			return err;
 
-		ww_mutex_lock_slow(ll->lock, &ctx);
+		ww_mutex_lock_slow(ll->lock, ctx);
 		list_move(&ll->link, &list);
 	}
 
-	ww_acquire_fini(&ctx);
 	return 0;
 }
 
-static void torture_ww_mutex_unlock(void)
+static void torture_ww_mutex_unlock(void *arg)
 __releases(torture_ww_mutex_0)
 __releases(torture_ww_mutex_1)
 __releases(torture_ww_mutex_2)
 {
+	struct ww_acquire_ctx *ctx = arg;
+
+	if (!ctx)
+		return;
+
 	ww_mutex_unlock(&torture_ww_mutex_0);
 	ww_mutex_unlock(&torture_ww_mutex_1);
 	ww_mutex_unlock(&torture_ww_mutex_2);
+	ww_acquire_fini(ctx);
+	kfree(ctx);
 }
 
 static struct lock_torture_ops ww_mutex_lock_ops = {
@@ -431,7 +449,8 @@ static struct lock_torture_ops ww_mutex_lock_ops = {
 #ifdef CONFIG_RT_MUTEXES
 static DEFINE_RT_MUTEX(torture_rtmutex);
 
-static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
+static int torture_rtmutex_lock(void **parg __maybe_unused)
+__acquires(torture_rtmutex)
 {
 	rt_mutex_lock(&torture_rtmutex);
 	return 0;
@@ -487,7 +506,8 @@ static void torture_rtmutex_delay(struct torture_random_state *trsp)
 		torture_preempt_schedule();  /* Allow test to be preempted. */
 }
 
-static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
+static void torture_rtmutex_unlock(void *arg __maybe_unused)
+__releases(torture_rtmutex)
 {
 	rt_mutex_unlock(&torture_rtmutex);
 }
@@ -505,7 +525,8 @@ static struct lock_torture_ops rtmutex_lock_ops = {
 #endif
 
 static DECLARE_RWSEM(torture_rwsem);
-static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
+static int torture_rwsem_down_write(void **parg __maybe_unused)
+__acquires(torture_rwsem)
 {
 	down_write(&torture_rwsem);
 	return 0;
@@ -525,7 +546,8 @@ static void torture_rwsem_write_delay(struct torture_random_state *trsp)
 		torture_preempt_schedule();  /* Allow test to be preempted. */
 }
 
-static void torture_rwsem_up_write(void) __releases(torture_rwsem)
+static void torture_rwsem_up_write(void *arg __maybe_unused)
+__releases(torture_rwsem)
 {
 	up_write(&torture_rwsem);
 }
@@ -579,13 +601,15 @@ static void torture_percpu_rwsem_exit(void)
 	percpu_free_rwsem(&pcpu_rwsem);
 }
 
-static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
+static int torture_percpu_rwsem_down_write(void **parg __maybe_unused)
+__acquires(pcpu_rwsem)
 {
 	percpu_down_write(&pcpu_rwsem);
 	return 0;
 }
 
-static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
+static void torture_percpu_rwsem_up_write(void *arg __maybe_unused)
+__releases(pcpu_rwsem)
 {
 	percpu_up_write(&pcpu_rwsem);
 }
@@ -618,7 +642,7 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = {
  * Lock torture writer kthread.  Repeatedly acquires and releases
  * the lock, checking for duplicate acquisitions.
  */
-static int lock_torture_writer(void *arg)
+static int lock_torture_writer(void *arg __maybe_unused)
 {
 	struct lock_stress_stats *lwsp = arg;
 	DEFINE_TORTURE_RANDOM(rand);
@@ -627,11 +651,13 @@ static int lock_torture_writer(void *arg)
 	set_user_nice(current, MAX_NICE);
 
 	do {
+		void *arg = NULL;
+
 		if ((torture_random(&rand) & 0xfffff) == 0)
 			schedule_timeout_uninterruptible(1);
 
 		cxt.cur_ops->task_boost(&rand);
-		cxt.cur_ops->writelock();
+		cxt.cur_ops->writelock(&arg);
 		if (WARN_ON_ONCE(lock_is_write_held))
 			lwsp->n_lock_fail++;
 		lock_is_write_held = true;
@@ -642,7 +668,7 @@ static int lock_torture_writer(void *arg)
 		cxt.cur_ops->write_delay(&rand);
 		lock_is_write_held = false;
 		WRITE_ONCE(last_lock_release, jiffies);
-		cxt.cur_ops->writeunlock();
+		cxt.cur_ops->writeunlock(arg);
 
 		stutter_wait("lock_torture_writer");
 	} while (!torture_must_stop());
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-16 15:31 ` [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling Waiman Long
@ 2021-03-16 18:55   ` Davidlohr Bueso
  2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
  1 sibling, 0 replies; 32+ messages in thread
From: Davidlohr Bueso @ 2021-03-16 18:55 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, linux-kernel, Juri Lelli

On Tue, 16 Mar 2021, Waiman Long wrote:

>The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the
>function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx)
>combination is repetitive.

I always found that very fugly.

>
>In fact, ww_ctx should not be used at all if !use_ww_ctx.  Simplify
>ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an
>clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx &&
>ww_ctx) by just (ww_ctx).
>
>Signed-off-by: Waiman Long <longman@redhat.com>

Acked-by: Davidlohr Bueso <dbueso@suse.de>

>---
> kernel/locking/mutex.c | 25 ++++++++++++++-----------
> 1 file changed, 14 insertions(+), 11 deletions(-)
>
>diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
>index adb935090768..622ebdfcd083 100644
>--- a/kernel/locking/mutex.c
>+++ b/kernel/locking/mutex.c
>@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
>  */
> static __always_inline bool
> mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
>-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
>+		      struct mutex_waiter *waiter)
> {
> 	if (!waiter) {
> 		/*
>@@ -702,7 +702,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
> #else
> static __always_inline bool
> mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
>-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
>+		      struct mutex_waiter *waiter)
> {
> 	return false;
> }
>@@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> 	struct ww_mutex *ww;
> 	int ret;
>
>+	if (!use_ww_ctx)
>+		ww_ctx = NULL;
>+
> 	might_sleep();
>
> #ifdef CONFIG_DEBUG_MUTEXES
>@@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> #endif
>
> 	ww = container_of(lock, struct ww_mutex, base);
>-	if (use_ww_ctx && ww_ctx) {
>+	if (ww_ctx) {
> 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
> 			return -EALREADY;
>
>@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
>
> 	if (__mutex_trylock(lock) ||
>-	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
>+	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
> 		/* got the lock, yay! */
> 		lock_acquired(&lock->dep_map, ip);
>-		if (use_ww_ctx && ww_ctx)
>+		if (ww_ctx)
> 			ww_mutex_set_context_fastpath(ww, ww_ctx);
> 		preempt_enable();
> 		return 0;
>@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> 	 * After waiting to acquire the wait_lock, try again.
> 	 */
> 	if (__mutex_trylock(lock)) {
>-		if (use_ww_ctx && ww_ctx)
>+		if (ww_ctx)
> 			__ww_mutex_check_waiters(lock, ww_ctx);
>
> 		goto skip_wait;
>@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> 			goto err;
> 		}
>
>-		if (use_ww_ctx && ww_ctx) {
>+		if (ww_ctx) {
> 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
> 			if (ret)
> 				goto err;
>@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> 		 * ww_mutex needs to always recheck its position since its waiter
> 		 * list is not FIFO ordered.
> 		 */
>-		if ((use_ww_ctx && ww_ctx) || !first) {
>+		if (ww_ctx || !first) {
> 			first = __mutex_waiter_is_first(lock, &waiter);
> 			if (first)
> 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
>@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> 		 * or we must see its unlock and acquire.
> 		 */
> 		if (__mutex_trylock(lock) ||
>-		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
>+		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
> 			break;
>
> 		spin_lock(&lock->wait_lock);
>@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> acquired:
> 	__set_current_state(TASK_RUNNING);
>
>-	if (use_ww_ctx && ww_ctx) {
>+	if (ww_ctx) {
> 		/*
> 		 * Wound-Wait; we stole the lock (!first_waiter), check the
> 		 * waiters as anyone might want to wound us.
>@@ -1068,7 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> 	/* got the lock - cleanup and rejoice! */
> 	lock_acquired(&lock->dep_map, ip);
>
>-	if (use_ww_ctx && ww_ctx)
>+	if (ww_ctx)
> 		ww_mutex_lock_acquired(ww, ww_ctx);
>
> 	spin_unlock(&lock->wait_lock);
>-- 
>2.18.1
>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-16 15:31 ` [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock Waiman Long
@ 2021-03-17  3:01   ` Davidlohr Bueso
  2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
  2021-03-18  2:24   ` [PATCH 3/4] " Boqun Feng
  2 siblings, 0 replies; 32+ messages in thread
From: Davidlohr Bueso @ 2021-03-17  3:01 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, linux-kernel, Juri Lelli

On Tue, 16 Mar 2021, Waiman Long wrote:

>It was found that running the ww_mutex_lock-torture test produced the
>following lockdep splat almost immediately:
>
>[  103.892638] ======================================================
>[  103.892639] WARNING: possible circular locking dependency detected
>[  103.892641] 5.12.0-rc3-debug+ #2 Tainted: G S      W
>[  103.892643] ------------------------------------------------------
>[  103.892643] lock_torture_wr/3234 is trying to acquire lock:
>[  103.892646] ffffffffc0b35b10 (torture_ww_mutex_2.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x316/0x720 [locktorture]
>[  103.892660]
>[  103.892660] but task is already holding lock:
>[  103.892661] ffffffffc0b35cd0 (torture_ww_mutex_0.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x3e2/0x720 [locktorture]
>[  103.892669]
>[  103.892669] which lock already depends on the new lock.
>[  103.892669]
>[  103.892670]
>[  103.892670] the existing dependency chain (in reverse order) is:
>[  103.892671]
>[  103.892671] -> #2 (torture_ww_mutex_0.base){+.+.}-{3:3}:
>[  103.892675]        lock_acquire+0x1c5/0x830
>[  103.892682]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
>[  103.892687]        ww_mutex_lock+0x4b/0x180
>[  103.892690]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
>[  103.892694]        lock_torture_writer+0x142/0x3a0 [locktorture]
>[  103.892698]        kthread+0x35f/0x430
>[  103.892701]        ret_from_fork+0x1f/0x30
>[  103.892706]
>[  103.892706] -> #1 (torture_ww_mutex_1.base){+.+.}-{3:3}:
>[  103.892709]        lock_acquire+0x1c5/0x830
>[  103.892712]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
>[  103.892715]        ww_mutex_lock+0x4b/0x180
>[  103.892717]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
>[  103.892721]        lock_torture_writer+0x142/0x3a0 [locktorture]
>[  103.892725]        kthread+0x35f/0x430
>[  103.892727]        ret_from_fork+0x1f/0x30
>[  103.892730]
>[  103.892730] -> #0 (torture_ww_mutex_2.base){+.+.}-{3:3}:
>[  103.892733]        check_prevs_add+0x3fd/0x2470
>[  103.892736]        __lock_acquire+0x2602/0x3100
>[  103.892738]        lock_acquire+0x1c5/0x830
>[  103.892740]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
>[  103.892743]        ww_mutex_lock+0x4b/0x180
>[  103.892746]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
>[  103.892749]        lock_torture_writer+0x142/0x3a0 [locktorture]
>[  103.892753]        kthread+0x35f/0x430
>[  103.892755]        ret_from_fork+0x1f/0x30
>[  103.892757]
>[  103.892757] other info that might help us debug this:
>[  103.892757]
>[  103.892758] Chain exists of:
>[  103.892758]   torture_ww_mutex_2.base --> torture_ww_mutex_1.base --> torture_ww_mutex_0.base
>[  103.892758]
>[  103.892763]  Possible unsafe locking scenario:
>[  103.892763]
>[  103.892764]        CPU0                    CPU1
>[  103.892765]        ----                    ----
>[  103.892765]   lock(torture_ww_mutex_0.base);
>[  103.892767] 				      lock(torture_ww_mutex_1.base);
>[  103.892770] 				      lock(torture_ww_mutex_0.base);
>[  103.892772]   lock(torture_ww_mutex_2.base);
>[  103.892774]
>[  103.892774]  *** DEADLOCK ***
>
>Since ww_mutex is supposed to be deadlock-proof if used properly, such
>deadlock scenario should not happen. To avoid this false positive splat,
>treat ww_mutex_lock() like a trylock().
>
>After applying this patch, the locktorture test can run for a long time
>without triggering the circular locking dependency splat.
>
>Signed-off-by: Waiman Long <longman@redhat.com>

Acked-by Davidlohr Bueso <dbueso@suse.de>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/4] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test
  2021-03-16 15:31 ` [PATCH 4/4] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test Waiman Long
@ 2021-03-17  5:16   ` Davidlohr Bueso
  2021-03-17 13:21     ` Waiman Long
  0 siblings, 1 reply; 32+ messages in thread
From: Davidlohr Bueso @ 2021-03-17  5:16 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, linux-kernel, Juri Lelli

On Tue, 16 Mar 2021, Waiman Long wrote:

>The ww_acquire_ctx structure for ww_mutex needs to persist for a complete
>lock/unlock cycle. In the ww_mutex test in locktorture, however, both
>ww_acquire_init() and ww_acquire_fini() are called within the lock
>function only. This causes a lockdep splat of "WARNING: Nested lock
>was not taken" when lockdep is enabled in the kernel.
>
>To fix this problem, we need to move the ww_acquire_fini() after the
>ww_mutex_unlock() in torture_ww_mutex_unlock(). In other word, we need
>to pass state information from the lock function to the unlock function.

Right, and afaict this _is_ the way ww_acquire_fini() should be called:

  * Releases a w/w acquire context. This must be called _after_ all acquired w/w
  * mutexes have been released with ww_mutex_unlock.

>Change the writelock and writeunlock function prototypes to allow that
>and change the torture_ww_mutex_lock() and torture_ww_mutex_unlock()
>accordingly.

But wouldn't just making ctx a global variable be enough instead? That way
we don't deal with memory allocation for every lock/unlock operation (yuck).
Plus the ENOMEM would need to be handled/propagated accordingly - the code
really doesn't expect any failure from ->writelock().

diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 0ab94e1f1276..606c0f6c1657 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -362,6 +362,8 @@ static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
  static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
  static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);

+static struct ww_acquire_ctx ctx;
+
  static int torture_ww_mutex_lock(void)
  __acquires(torture_ww_mutex_0)
  __acquires(torture_ww_mutex_1)
@@ -372,7 +374,6 @@ __acquires(torture_ww_mutex_2)
		struct list_head link;
		struct ww_mutex *lock;
	} locks[3], *ll, *ln;
-	struct ww_acquire_ctx ctx;

	locks[0].lock = &torture_ww_mutex_0;
	list_add(&locks[0].link, &list);
@@ -403,7 +404,6 @@ __acquires(torture_ww_mutex_2)
		list_move(&ll->link, &list);
	}

-	ww_acquire_fini(&ctx);
	return 0;
  }

@@ -415,6 +415,8 @@ __releases(torture_ww_mutex_2)
	ww_mutex_unlock(&torture_ww_mutex_0);
	ww_mutex_unlock(&torture_ww_mutex_1);
	ww_mutex_unlock(&torture_ww_mutex_2);
+
+	ww_acquire_fini(&ctx);
  }

  static struct lock_torture_ops ww_mutex_lock_ops = {

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-16 15:31 ` [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock Waiman Long
  2021-03-17  3:01   ` Davidlohr Bueso
@ 2021-03-17 12:38   ` tip-bot2 for Waiman Long
  2021-03-17 13:12     ` Peter Zijlstra
  2021-03-18  2:24   ` [PATCH 3/4] " Boqun Feng
  2 siblings, 1 reply; 32+ messages in thread
From: tip-bot2 for Waiman Long @ 2021-03-17 12:38 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: Waiman Long, Ingo Molnar, x86, linux-kernel

The following commit has been merged into the locking/urgent branch of tip:

Commit-ID:     b058f2e4d0a70c060e21ed122b264e9649cad57f
Gitweb:        https://git.kernel.org/tip/b058f2e4d0a70c060e21ed122b264e9649cad57f
Author:        Waiman Long <longman@redhat.com>
AuthorDate:    Tue, 16 Mar 2021 11:31:18 -04:00
Committer:     Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 17 Mar 2021 09:56:46 +01:00

locking/ww_mutex: Treat ww_mutex_lock() like a trylock

It was found that running the ww_mutex_lock-torture test produced the
following lockdep splat almost immediately:

[  103.892638] ======================================================
[  103.892639] WARNING: possible circular locking dependency detected
[  103.892641] 5.12.0-rc3-debug+ #2 Tainted: G S      W
[  103.892643] ------------------------------------------------------
[  103.892643] lock_torture_wr/3234 is trying to acquire lock:
[  103.892646] ffffffffc0b35b10 (torture_ww_mutex_2.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892660]
[  103.892660] but task is already holding lock:
[  103.892661] ffffffffc0b35cd0 (torture_ww_mutex_0.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x3e2/0x720 [locktorture]
[  103.892669]
[  103.892669] which lock already depends on the new lock.
[  103.892669]
[  103.892670]
[  103.892670] the existing dependency chain (in reverse order) is:
[  103.892671]
[  103.892671] -> #2 (torture_ww_mutex_0.base){+.+.}-{3:3}:
[  103.892675]        lock_acquire+0x1c5/0x830
[  103.892682]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
[  103.892687]        ww_mutex_lock+0x4b/0x180
[  103.892690]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892694]        lock_torture_writer+0x142/0x3a0 [locktorture]
[  103.892698]        kthread+0x35f/0x430
[  103.892701]        ret_from_fork+0x1f/0x30
[  103.892706]
[  103.892706] -> #1 (torture_ww_mutex_1.base){+.+.}-{3:3}:
[  103.892709]        lock_acquire+0x1c5/0x830
[  103.892712]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
[  103.892715]        ww_mutex_lock+0x4b/0x180
[  103.892717]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892721]        lock_torture_writer+0x142/0x3a0 [locktorture]
[  103.892725]        kthread+0x35f/0x430
[  103.892727]        ret_from_fork+0x1f/0x30
[  103.892730]
[  103.892730] -> #0 (torture_ww_mutex_2.base){+.+.}-{3:3}:
[  103.892733]        check_prevs_add+0x3fd/0x2470
[  103.892736]        __lock_acquire+0x2602/0x3100
[  103.892738]        lock_acquire+0x1c5/0x830
[  103.892740]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
[  103.892743]        ww_mutex_lock+0x4b/0x180
[  103.892746]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
[  103.892749]        lock_torture_writer+0x142/0x3a0 [locktorture]
[  103.892753]        kthread+0x35f/0x430
[  103.892755]        ret_from_fork+0x1f/0x30
[  103.892757]
[  103.892757] other info that might help us debug this:
[  103.892757]
[  103.892758] Chain exists of:
[  103.892758]   torture_ww_mutex_2.base --> torture_ww_mutex_1.base --> torture_ww_mutex_0.base
[  103.892758]
[  103.892763]  Possible unsafe locking scenario:
[  103.892763]
[  103.892764]        CPU0                    CPU1
[  103.892765]        ----                    ----
[  103.892765]   lock(torture_ww_mutex_0.base);
[  103.892767] 				      lock(torture_ww_mutex_1.base);
[  103.892770] 				      lock(torture_ww_mutex_0.base);
[  103.892772]   lock(torture_ww_mutex_2.base);
[  103.892774]
[  103.892774]  *** DEADLOCK ***

Since ww_mutex is supposed to be deadlock-proof if used properly, such
deadlock scenario should not happen. To avoid this false positive splat,
treat ww_mutex_lock() like a trylock().

After applying this patch, the locktorture test can run for a long time
without triggering the circular locking dependency splat.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by Davidlohr Bueso <dbueso@suse.de>
Link: https://lore.kernel.org/r/20210316153119.13802-4-longman@redhat.com
---
 kernel/locking/mutex.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 622ebdf..bb89393 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -946,7 +946,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	}
 
 	preempt_disable();
-	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+	/*
+	 * Treat as trylock for ww_mutex.
+	 */
+	mutex_acquire_nest(&lock->dep_map, subclass, !!ww_ctx, nest_lock, ip);
 
 	if (__mutex_trylock(lock) ||
 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [tip: locking/urgent] locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini()
  2021-03-16 15:31 ` [PATCH 2/4] locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini() Waiman Long
@ 2021-03-17 12:38   ` tip-bot2 for Waiman Long
  0 siblings, 0 replies; 32+ messages in thread
From: tip-bot2 for Waiman Long @ 2021-03-17 12:38 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: Waiman Long, Ingo Molnar, x86, linux-kernel

The following commit has been merged into the locking/urgent branch of tip:

Commit-ID:     bee645788e07eea63055d261d2884ea45c2ba857
Gitweb:        https://git.kernel.org/tip/bee645788e07eea63055d261d2884ea45c2ba857
Author:        Waiman Long <longman@redhat.com>
AuthorDate:    Tue, 16 Mar 2021 11:31:17 -04:00
Committer:     Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 17 Mar 2021 09:56:45 +01:00

locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini()

In ww_acquire_init(), mutex_acquire() is gated by CONFIG_DEBUG_LOCK_ALLOC.
The dep_map in the ww_acquire_ctx structure is also gated by the
same config. However mutex_release() in ww_acquire_fini() is gated by
CONFIG_DEBUG_MUTEXES. It is possible to set CONFIG_DEBUG_MUTEXES without
setting CONFIG_DEBUG_LOCK_ALLOC though it is an unlikely configuration.
That may cause a compilation error as dep_map isn't defined in this
case. Fix this potential problem by enclosing mutex_release() inside
CONFIG_DEBUG_LOCK_ALLOC.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210316153119.13802-3-longman@redhat.com
---
 include/linux/ww_mutex.h | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 850424e..6ecf2a0 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
  */
 static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
 	mutex_release(&ctx->dep_map, _THIS_IP_);
-
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
 	DEBUG_LOCKS_WARN_ON(ctx->acquired);
 	if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
 		/*

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [tip: locking/urgent] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-16 15:31 ` [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling Waiman Long
  2021-03-16 18:55   ` Davidlohr Bueso
@ 2021-03-17 12:38   ` tip-bot2 for Waiman Long
  2021-03-17 12:59     ` Peter Zijlstra
  1 sibling, 1 reply; 32+ messages in thread
From: tip-bot2 for Waiman Long @ 2021-03-17 12:38 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Waiman Long, Ingo Molnar, Davidlohr Bueso, x86, linux-kernel

The following commit has been merged into the locking/urgent branch of tip:

Commit-ID:     5de2055d31ea88fd9ae9709ac95c372a505a60fa
Gitweb:        https://git.kernel.org/tip/5de2055d31ea88fd9ae9709ac95c372a505a60fa
Author:        Waiman Long <longman@redhat.com>
AuthorDate:    Tue, 16 Mar 2021 11:31:16 -04:00
Committer:     Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 17 Mar 2021 09:56:44 +01:00

locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling

The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the
function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx)
combination is repetitive.

In fact, ww_ctx should not be used at all if !use_ww_ctx.  Simplify
ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an
clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx &&
ww_ctx) by just (ww_ctx).

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Link: https://lore.kernel.org/r/20210316153119.13802-2-longman@redhat.com
---
 kernel/locking/mutex.c | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index adb9350..622ebdf 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
  */
 static __always_inline bool
 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
+		      struct mutex_waiter *waiter)
 {
 	if (!waiter) {
 		/*
@@ -702,7 +702,7 @@ fail:
 #else
 static __always_inline bool
 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
+		      struct mutex_waiter *waiter)
 {
 	return false;
 }
@@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	struct ww_mutex *ww;
 	int ret;
 
+	if (!use_ww_ctx)
+		ww_ctx = NULL;
+
 	might_sleep();
 
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 #endif
 
 	ww = container_of(lock, struct ww_mutex, base);
-	if (use_ww_ctx && ww_ctx) {
+	if (ww_ctx) {
 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
 			return -EALREADY;
 
@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
 	if (__mutex_trylock(lock) ||
-	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
+	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
 		/* got the lock, yay! */
 		lock_acquired(&lock->dep_map, ip);
-		if (use_ww_ctx && ww_ctx)
+		if (ww_ctx)
 			ww_mutex_set_context_fastpath(ww, ww_ctx);
 		preempt_enable();
 		return 0;
@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	 * After waiting to acquire the wait_lock, try again.
 	 */
 	if (__mutex_trylock(lock)) {
-		if (use_ww_ctx && ww_ctx)
+		if (ww_ctx)
 			__ww_mutex_check_waiters(lock, ww_ctx);
 
 		goto skip_wait;
@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 			goto err;
 		}
 
-		if (use_ww_ctx && ww_ctx) {
+		if (ww_ctx) {
 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
 			if (ret)
 				goto err;
@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * ww_mutex needs to always recheck its position since its waiter
 		 * list is not FIFO ordered.
 		 */
-		if ((use_ww_ctx && ww_ctx) || !first) {
+		if (ww_ctx || !first) {
 			first = __mutex_waiter_is_first(lock, &waiter);
 			if (first)
 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * or we must see its unlock and acquire.
 		 */
 		if (__mutex_trylock(lock) ||
-		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
+		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
 			break;
 
 		spin_lock(&lock->wait_lock);
@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 acquired:
 	__set_current_state(TASK_RUNNING);
 
-	if (use_ww_ctx && ww_ctx) {
+	if (ww_ctx) {
 		/*
 		 * Wound-Wait; we stole the lock (!first_waiter), check the
 		 * waiters as anyone might want to wound us.
@@ -1068,7 +1071,7 @@ skip_wait:
 	/* got the lock - cleanup and rejoice! */
 	lock_acquired(&lock->dep_map, ip);
 
-	if (use_ww_ctx && ww_ctx)
+	if (ww_ctx)
 		ww_mutex_lock_acquired(ww, ww_ctx);
 
 	spin_unlock(&lock->wait_lock);

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
@ 2021-03-17 12:59     ` Peter Zijlstra
  2021-03-17 13:43       ` Waiman Long
  0 siblings, 1 reply; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 12:59 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-tip-commits, Waiman Long, Ingo Molnar, Davidlohr Bueso, x86

On Wed, Mar 17, 2021 at 12:38:22PM -0000, tip-bot2 for Waiman Long wrote:
> The following commit has been merged into the locking/urgent branch of tip:
> 
> Commit-ID:     5de2055d31ea88fd9ae9709ac95c372a505a60fa
> Gitweb:        https://git.kernel.org/tip/5de2055d31ea88fd9ae9709ac95c372a505a60fa
> Author:        Waiman Long <longman@redhat.com>
> AuthorDate:    Tue, 16 Mar 2021 11:31:16 -04:00
> Committer:     Ingo Molnar <mingo@kernel.org>
> CommitterDate: Wed, 17 Mar 2021 09:56:44 +01:00
> 
> locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
> 
> The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the
> function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx)
> combination is repetitive.
> 
> In fact, ww_ctx should not be used at all if !use_ww_ctx.  Simplify
> ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an
> clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx &&
> ww_ctx) by just (ww_ctx).

The reason this code was like this is because GCC could constant
propagage use_ww_ctx but could not do the same for ww_ctx (since that's
external).

Please double check generated code to make sure you've not introduced a
bunch of extra branches.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
@ 2021-03-17 13:12     ` Peter Zijlstra
  2021-03-17 13:31       ` Peter Zijlstra
  0 siblings, 1 reply; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 13:12 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-tip-commits, Waiman Long, Ingo Molnar, x86

On Wed, Mar 17, 2021 at 12:38:21PM -0000, tip-bot2 for Waiman Long wrote:
> The following commit has been merged into the locking/urgent branch of tip:
> 
> Commit-ID:     b058f2e4d0a70c060e21ed122b264e9649cad57f
> Gitweb:        https://git.kernel.org/tip/b058f2e4d0a70c060e21ed122b264e9649cad57f
> Author:        Waiman Long <longman@redhat.com>
> AuthorDate:    Tue, 16 Mar 2021 11:31:18 -04:00
> Committer:     Ingo Molnar <mingo@kernel.org>
> CommitterDate: Wed, 17 Mar 2021 09:56:46 +01:00
> 
> locking/ww_mutex: Treat ww_mutex_lock() like a trylock
> 
> It was found that running the ww_mutex_lock-torture test produced the
> following lockdep splat almost immediately:
> 
> [  103.892638] ======================================================
> [  103.892639] WARNING: possible circular locking dependency detected
> [  103.892641] 5.12.0-rc3-debug+ #2 Tainted: G S      W
> [  103.892643] ------------------------------------------------------
> [  103.892643] lock_torture_wr/3234 is trying to acquire lock:
> [  103.892646] ffffffffc0b35b10 (torture_ww_mutex_2.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892660]
> [  103.892660] but task is already holding lock:
> [  103.892661] ffffffffc0b35cd0 (torture_ww_mutex_0.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x3e2/0x720 [locktorture]
> [  103.892669]
> [  103.892669] which lock already depends on the new lock.
> [  103.892669]
> [  103.892670]
> [  103.892670] the existing dependency chain (in reverse order) is:
> [  103.892671]
> [  103.892671] -> #2 (torture_ww_mutex_0.base){+.+.}-{3:3}:
> [  103.892675]        lock_acquire+0x1c5/0x830
> [  103.892682]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
> [  103.892687]        ww_mutex_lock+0x4b/0x180
> [  103.892690]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892694]        lock_torture_writer+0x142/0x3a0 [locktorture]
> [  103.892698]        kthread+0x35f/0x430
> [  103.892701]        ret_from_fork+0x1f/0x30
> [  103.892706]
> [  103.892706] -> #1 (torture_ww_mutex_1.base){+.+.}-{3:3}:
> [  103.892709]        lock_acquire+0x1c5/0x830
> [  103.892712]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
> [  103.892715]        ww_mutex_lock+0x4b/0x180
> [  103.892717]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892721]        lock_torture_writer+0x142/0x3a0 [locktorture]
> [  103.892725]        kthread+0x35f/0x430
> [  103.892727]        ret_from_fork+0x1f/0x30
> [  103.892730]
> [  103.892730] -> #0 (torture_ww_mutex_2.base){+.+.}-{3:3}:
> [  103.892733]        check_prevs_add+0x3fd/0x2470
> [  103.892736]        __lock_acquire+0x2602/0x3100
> [  103.892738]        lock_acquire+0x1c5/0x830
> [  103.892740]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
> [  103.892743]        ww_mutex_lock+0x4b/0x180
> [  103.892746]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892749]        lock_torture_writer+0x142/0x3a0 [locktorture]
> [  103.892753]        kthread+0x35f/0x430
> [  103.892755]        ret_from_fork+0x1f/0x30
> [  103.892757]
> [  103.892757] other info that might help us debug this:
> [  103.892757]
> [  103.892758] Chain exists of:
> [  103.892758]   torture_ww_mutex_2.base --> torture_ww_mutex_1.base --> torture_ww_mutex_0.base
> [  103.892758]
> [  103.892763]  Possible unsafe locking scenario:
> [  103.892763]
> [  103.892764]        CPU0                    CPU1
> [  103.892765]        ----                    ----
> [  103.892765]   lock(torture_ww_mutex_0.base);
> [  103.892767] 				      lock(torture_ww_mutex_1.base);
> [  103.892770] 				      lock(torture_ww_mutex_0.base);
> [  103.892772]   lock(torture_ww_mutex_2.base);
> [  103.892774]
> [  103.892774]  *** DEADLOCK ***
> 
> Since ww_mutex is supposed to be deadlock-proof if used properly, such
> deadlock scenario should not happen. To avoid this false positive splat,
> treat ww_mutex_lock() like a trylock().
> 
> After applying this patch, the locktorture test can run for a long time
> without triggering the circular locking dependency splat.
> 
> Signed-off-by: Waiman Long <longman@redhat.com>
> Signed-off-by: Ingo Molnar <mingo@kernel.org>
> Acked-by Davidlohr Bueso <dbueso@suse.de>
> Link: https://lore.kernel.org/r/20210316153119.13802-4-longman@redhat.com
> ---
>  kernel/locking/mutex.c | 5 ++++-
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
> index 622ebdf..bb89393 100644
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -946,7 +946,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
>  	}
>  
>  	preempt_disable();
> -	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
> +	/*
> +	 * Treat as trylock for ww_mutex.
> +	 */
> +	mutex_acquire_nest(&lock->dep_map, subclass, !!ww_ctx, nest_lock, ip);

I'm confused... why isn't nest_lock working here?

For ww_mutex, we're supposed to have ctx->dep_map as a nest_lock, and
all lock acquisitions under a nest lock should be fine. Afaict the above
is just plain wrong.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 4/4] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test
  2021-03-17  5:16   ` Davidlohr Bueso
@ 2021-03-17 13:21     ` Waiman Long
  0 siblings, 0 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-17 13:21 UTC (permalink / raw)
  To: Davidlohr Bueso
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Paul E. McKenney, linux-kernel, Juri Lelli

On 3/17/21 1:16 AM, Davidlohr Bueso wrote:
> On Tue, 16 Mar 2021, Waiman Long wrote:
>
>> The ww_acquire_ctx structure for ww_mutex needs to persist for a 
>> complete
>> lock/unlock cycle. In the ww_mutex test in locktorture, however, both
>> ww_acquire_init() and ww_acquire_fini() are called within the lock
>> function only. This causes a lockdep splat of "WARNING: Nested lock
>> was not taken" when lockdep is enabled in the kernel.
>>
>> To fix this problem, we need to move the ww_acquire_fini() after the
>> ww_mutex_unlock() in torture_ww_mutex_unlock(). In other word, we need
>> to pass state information from the lock function to the unlock function.
>
> Right, and afaict this _is_ the way ww_acquire_fini() should be called:
>
>  * Releases a w/w acquire context. This must be called _after_ all 
> acquired w/w
>  * mutexes have been released with ww_mutex_unlock.
>
>> Change the writelock and writeunlock function prototypes to allow that
>> and change the torture_ww_mutex_lock() and torture_ww_mutex_unlock()
>> accordingly.
>
> But wouldn't just making ctx a global variable be enough instead? That 
> way
> we don't deal with memory allocation for every lock/unlock operation 
> (yuck).
> Plus the ENOMEM would need to be handled/propagated accordingly - the 
> code
> really doesn't expect any failure from ->writelock().

The ctx should be per-thread to track potential locking conflict. Since 
there are as many locking threads as the number of cpus, we can't use 
one global variable to do that. I was thinking about using per-cpu 
variable but locktorture kthreads are cpu-bound. That led me to use the 
current scheme of allocation at lock and free at unlock.

Another alternative is to add a per-thread init/fini methods to allow 
setting up per-thread context that is passed to the locking functions. 
By doing that, we only need one kmalloc/kfree pair per running 
locktorture kthread.

Cheers,
Longman



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 13:12     ` Peter Zijlstra
@ 2021-03-17 13:31       ` Peter Zijlstra
  2021-03-17 14:03         ` Waiman Long
  0 siblings, 1 reply; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 13:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-tip-commits, Waiman Long, Ingo Molnar, x86

On Wed, Mar 17, 2021 at 02:12:41PM +0100, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 12:38:21PM -0000, tip-bot2 for Waiman Long wrote:
> > +	/*
> > +	 * Treat as trylock for ww_mutex.
> > +	 */
> > +	mutex_acquire_nest(&lock->dep_map, subclass, !!ww_ctx, nest_lock, ip);
> 
> I'm confused... why isn't nest_lock working here?
> 
> For ww_mutex, we're supposed to have ctx->dep_map as a nest_lock, and
> all lock acquisitions under a nest lock should be fine. Afaict the above
> is just plain wrong.

To clarify:

	mutex_lock(&A);			ww_mutex_lock(&B, ctx);
	ww_mutex_lock(&B, ctx);		mutex_lock(&A);

should still very much be a deadlock, but your 'fix' makes it not report
that.

Only order within the ww_ctx can be ignored, and that's exactly what
nest_lock should be doing.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-17 12:59     ` Peter Zijlstra
@ 2021-03-17 13:43       ` Waiman Long
  2021-03-17 13:55         ` Peter Zijlstra
  0 siblings, 1 reply; 32+ messages in thread
From: Waiman Long @ 2021-03-17 13:43 UTC (permalink / raw)
  To: Peter Zijlstra, linux-kernel
  Cc: linux-tip-commits, Ingo Molnar, Davidlohr Bueso, x86

On 3/17/21 8:59 AM, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 12:38:22PM -0000, tip-bot2 for Waiman Long wrote:
>> The following commit has been merged into the locking/urgent branch of tip:
>>
>> Commit-ID:     5de2055d31ea88fd9ae9709ac95c372a505a60fa
>> Gitweb:        https://git.kernel.org/tip/5de2055d31ea88fd9ae9709ac95c372a505a60fa
>> Author:        Waiman Long <longman@redhat.com>
>> AuthorDate:    Tue, 16 Mar 2021 11:31:16 -04:00
>> Committer:     Ingo Molnar <mingo@kernel.org>
>> CommitterDate: Wed, 17 Mar 2021 09:56:44 +01:00
>>
>> locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
>>
>> The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the
>> function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx)
>> combination is repetitive.
>>
>> In fact, ww_ctx should not be used at all if !use_ww_ctx.  Simplify
>> ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an
>> clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx &&
>> ww_ctx) by just (ww_ctx).
> The reason this code was like this is because GCC could constant
> propagage use_ww_ctx but could not do the same for ww_ctx (since that's
> external).
>
> Please double check generated code to make sure you've not introduced a
> bunch of extra branches.
>
I see, but this patch just replaces (use_ww_ctx && ww_ctx) by (ww_ctx). 
Even if constant propagation isn't happening for ww_ctx, gcc shouldn't 
generate any worse code wrt ww_ctx. It could be that the generated 
machine code are more or less the same, but the source code will look 
cleaner with just one variable in the conditional clauses.

Using gcc 8.4.1, the generated __mutex_lock function has the same size 
(with last instruction at offset +5179) with or without this patch. 
Well, you can say that this patch is an no-op wrt generated code.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-17 13:43       ` Waiman Long
@ 2021-03-17 13:55         ` Peter Zijlstra
  2021-03-17 14:10           ` Waiman Long
  0 siblings, 1 reply; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 13:55 UTC (permalink / raw)
  To: Waiman Long
  Cc: linux-kernel, linux-tip-commits, Ingo Molnar, Davidlohr Bueso, x86

On Wed, Mar 17, 2021 at 09:43:20AM -0400, Waiman Long wrote:

> Using gcc 8.4.1, the generated __mutex_lock function has the same size (with
> last instruction at offset +5179) with or without this patch. Well, you can
> say that this patch is an no-op wrt generated code.

OK, then GCC has gotten better. Because back then I tried really hard
but it wouldn't remove the if (ww_ctx) branches unless I had that extra
const bool argument.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 13:31       ` Peter Zijlstra
@ 2021-03-17 14:03         ` Waiman Long
  2021-03-17 15:35           ` Waiman Long
  0 siblings, 1 reply; 32+ messages in thread
From: Waiman Long @ 2021-03-17 14:03 UTC (permalink / raw)
  To: Peter Zijlstra, linux-kernel; +Cc: linux-tip-commits, Ingo Molnar, x86

On 3/17/21 9:31 AM, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 02:12:41PM +0100, Peter Zijlstra wrote:
>> On Wed, Mar 17, 2021 at 12:38:21PM -0000, tip-bot2 for Waiman Long wrote:
>>> +	/*
>>> +	 * Treat as trylock for ww_mutex.
>>> +	 */
>>> +	mutex_acquire_nest(&lock->dep_map, subclass, !!ww_ctx, nest_lock, ip);
>> I'm confused... why isn't nest_lock working here?
>>
>> For ww_mutex, we're supposed to have ctx->dep_map as a nest_lock, and
>> all lock acquisitions under a nest lock should be fine. Afaict the above
>> is just plain wrong.
> To clarify:
>
> 	mutex_lock(&A);			ww_mutex_lock(&B, ctx);
> 	ww_mutex_lock(&B, ctx);		mutex_lock(&A);
>
> should still very much be a deadlock, but your 'fix' makes it not report
> that.
>
> Only order within the ww_ctx can be ignored, and that's exactly what
> nest_lock should be doing.
>
I will take a deeper look into why that is the case.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-17 13:55         ` Peter Zijlstra
@ 2021-03-17 14:10           ` Waiman Long
  2021-03-17 14:17             ` Peter Zijlstra
  2021-03-17 14:33             ` Waiman Long
  0 siblings, 2 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-17 14:10 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: linux-kernel, linux-tip-commits, Ingo Molnar, Davidlohr Bueso, x86

On 3/17/21 9:55 AM, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 09:43:20AM -0400, Waiman Long wrote:
>
>> Using gcc 8.4.1, the generated __mutex_lock function has the same size (with
>> last instruction at offset +5179) with or without this patch. Well, you can
>> say that this patch is an no-op wrt generated code.
> OK, then GCC has gotten better. Because back then I tried really hard
> but it wouldn't remove the if (ww_ctx) branches unless I had that extra
> const bool argument.
>
I think ww_mutex was merged in 2013. That is almost 8 years ago. It 
could still be the case that older gcc compilers may not generate the 
right code. I will try the RHEL7 gcc compiler (4.8.5) to see how it fares.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-17 14:10           ` Waiman Long
@ 2021-03-17 14:17             ` Peter Zijlstra
  2021-03-17 14:33             ` Waiman Long
  1 sibling, 0 replies; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 14:17 UTC (permalink / raw)
  To: Waiman Long
  Cc: linux-kernel, linux-tip-commits, Ingo Molnar, Davidlohr Bueso, x86

On Wed, Mar 17, 2021 at 10:10:16AM -0400, Waiman Long wrote:
> On 3/17/21 9:55 AM, Peter Zijlstra wrote:
> > On Wed, Mar 17, 2021 at 09:43:20AM -0400, Waiman Long wrote:
> > 
> > > Using gcc 8.4.1, the generated __mutex_lock function has the same size (with
> > > last instruction at offset +5179) with or without this patch. Well, you can
> > > say that this patch is an no-op wrt generated code.
> > OK, then GCC has gotten better. Because back then I tried really hard
> > but it wouldn't remove the if (ww_ctx) branches unless I had that extra
> > const bool argument.
> > 
> I think ww_mutex was merged in 2013. That is almost 8 years ago. It could
> still be the case that older gcc compilers may not generate the right code.
> I will try the RHEL7 gcc compiler (4.8.5) to see how it fares.

I really don't care about code generation qualitee of anything before
8-ish at this point. That's already an old compiler.

If you run on ancient compilers, you simply don't care about code
quality.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
  2021-03-17 14:10           ` Waiman Long
  2021-03-17 14:17             ` Peter Zijlstra
@ 2021-03-17 14:33             ` Waiman Long
  1 sibling, 0 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-17 14:33 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: linux-kernel, linux-tip-commits, Ingo Molnar, Davidlohr Bueso, x86

On 3/17/21 10:10 AM, Waiman Long wrote:
> On 3/17/21 9:55 AM, Peter Zijlstra wrote:
>> On Wed, Mar 17, 2021 at 09:43:20AM -0400, Waiman Long wrote:
>>
>>> Using gcc 8.4.1, the generated __mutex_lock function has the same 
>>> size (with
>>> last instruction at offset +5179) with or without this patch. Well, 
>>> you can
>>> say that this patch is an no-op wrt generated code.
>> OK, then GCC has gotten better. Because back then I tried really hard
>> but it wouldn't remove the if (ww_ctx) branches unless I had that extra
>> const bool argument.
>>
> I think ww_mutex was merged in 2013. That is almost 8 years ago. It 
> could still be the case that older gcc compilers may not generate the 
> right code. I will try the RHEL7 gcc compiler (4.8.5) to see how it 
> fares. 

I got the same result with the 4.8.5 compiler. The __mutex_lock() 
function has the same size with or without the patch. Note that I used 
the debug config during my RHEL8 compiler test, so the generated code 
size is much larger. With the non-debug config that I used for the 4.8.5 
compiler test, the code is only about 1236 bytes.

Since the current Linux kernel requires gcc 4.9 or above (I downgraded 
the kernel to v5.4 for my 4.8.5 compiler test), the fear of generating 
inferior code due to this patch should be a moot point.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 14:03         ` Waiman Long
@ 2021-03-17 15:35           ` Waiman Long
  2021-03-17 16:48             ` Peter Zijlstra
  0 siblings, 1 reply; 32+ messages in thread
From: Waiman Long @ 2021-03-17 15:35 UTC (permalink / raw)
  To: Peter Zijlstra, linux-kernel; +Cc: linux-tip-commits, Ingo Molnar, x86

On 3/17/21 10:03 AM, Waiman Long wrote:
> On 3/17/21 9:31 AM, Peter Zijlstra wrote:
>> On Wed, Mar 17, 2021 at 02:12:41PM +0100, Peter Zijlstra wrote:
>>> On Wed, Mar 17, 2021 at 12:38:21PM -0000, tip-bot2 for Waiman Long 
>>> wrote:
>>>> +    /*
>>>> +     * Treat as trylock for ww_mutex.
>>>> +     */
>>>> +    mutex_acquire_nest(&lock->dep_map, subclass, !!ww_ctx, 
>>>> nest_lock, ip);
>>> I'm confused... why isn't nest_lock working here?
>>>
>>> For ww_mutex, we're supposed to have ctx->dep_map as a nest_lock, and
>>> all lock acquisitions under a nest lock should be fine. Afaict the 
>>> above
>>> is just plain wrong.
>> To clarify:
>>
>>     mutex_lock(&A);            ww_mutex_lock(&B, ctx);
>>     ww_mutex_lock(&B, ctx);        mutex_lock(&A);
>>
>> should still very much be a deadlock, but your 'fix' makes it not report
>> that.
>>
>> Only order within the ww_ctx can be ignored, and that's exactly what
>> nest_lock should be doing.
>>
> I will take a deeper look into why that is the case. 

 From reading the source code, nest_lock check is done in 
check_deadlock() so that it won't complain. However, nest_lock isn't 
considered in check_noncircular() which causes the splat to come out. 
Maybe we should add a check for nest_lock there. I will fiddle with the 
code to see if it can address the issue.

Cheers,
Longman



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 15:35           ` Waiman Long
@ 2021-03-17 16:48             ` Peter Zijlstra
  2021-03-17 17:40               ` Peter Zijlstra
  0 siblings, 1 reply; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 16:48 UTC (permalink / raw)
  To: Waiman Long; +Cc: linux-kernel, linux-tip-commits, Ingo Molnar, x86

On Wed, Mar 17, 2021 at 11:35:12AM -0400, Waiman Long wrote:

> From reading the source code, nest_lock check is done in check_deadlock() so
> that it won't complain. However, nest_lock isn't considered in
> check_noncircular() which causes the splat to come out. Maybe we should add
> a check for nest_lock there. I will fiddle with the code to see if it can
> address the issue.

Nah, that's not how it's supposed to work. I think the problem is that
DEFINE_WW_MUTEX is buggered, there's not actually any other user of it
in-tree.

Everybody else (including locking-selftests) seem to be using
ww_mutex_init().

So all locks in a ww_class should be having the same lock class, and
then nest_lock will fold them all into a single entry with ->references
incremented. See __lock_acquire().

But from the report:

> [  103.892671] -> #2 (torture_ww_mutex_0.base){+.+.}-{3:3}:
> [  103.892706] -> #1 (torture_ww_mutex_1.base){+.+.}-{3:3}:
> [  103.892730] -> #0 (torture_ww_mutex_2.base){+.+.}-{3:3}:

that went sideways, they're *not* the same class.

I think you'll find that if you use ww_mutex_init() it'll all work. Let
me go and zap this patch, and then I'll try and figure out why
DEFINE_WW_MUTEX() is buggered.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 16:48             ` Peter Zijlstra
@ 2021-03-17 17:40               ` Peter Zijlstra
  2021-03-17 17:45                 ` Peter Zijlstra
  2021-03-17 18:14                 ` Waiman Long
  0 siblings, 2 replies; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 17:40 UTC (permalink / raw)
  To: Waiman Long; +Cc: linux-kernel, linux-tip-commits, Ingo Molnar, x86

On Wed, Mar 17, 2021 at 05:48:48PM +0100, Peter Zijlstra wrote:

> I think you'll find that if you use ww_mutex_init() it'll all work. Let
> me go and zap this patch, and then I'll try and figure out why
> DEFINE_WW_MUTEX() is buggered.

Moo, I can't get the compiler to do as I want :/

The below is so close but doesn't actually compile.. Maybe we should
just give up on DEFINE_WW_MUTEX and simply remove it.

---
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 0cd631a19727..85f50538f26a 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -129,12 +129,15 @@ do {									\
 # define __DEP_MAP_MUTEX_INITIALIZER(lockname)
 #endif
 
-#define __MUTEX_INITIALIZER(lockname) \
+#define ___MUTEX_INITIALIZER(lockname, depmap) \
 		{ .owner = ATOMIC_LONG_INIT(0) \
 		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
 		, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
 		__DEBUG_MUTEX_INITIALIZER(lockname) \
-		__DEP_MAP_MUTEX_INITIALIZER(lockname) }
+		depmap }
+
+#define __MUTEX_INITIALIZER(lockname) \
+		___MUTEX_INITIALIZER(lockname, __DEP_MAP_MUTEX_INITIALIZER(lockname))
 
 #define DEFINE_MUTEX(mutexname) \
 	struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 6ecf2a0220db..c62a030652b4 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -50,9 +50,17 @@ struct ww_acquire_ctx {
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
-		, .ww_class = class
+		, .ww_class = &(class)
+
+# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class) \
+		, .dep_map = { \
+			.key = &(class).mutex_key, \
+			.name = (class).mutex_name, \
+			.wait_type_inner = LD_WAIT_SLEEP, \
+		}
 #else
 # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
+# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class)
 #endif
 
 #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die)	    \
@@ -62,7 +70,8 @@ struct ww_acquire_ctx {
 		, .is_wait_die = _is_wait_die }
 
 #define __WW_MUTEX_INITIALIZER(lockname, class) \
-		{ .base =  __MUTEX_INITIALIZER(lockname.base) \
+		{ .base =  ___MUTEX_INITIALIZER(lockname.base, \
+			__DEP_MAP_WW_MUTEX_INITIALIZER(lockname.base, class)) \
 		__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
 
 #define DEFINE_WD_CLASS(classname) \
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 0ab94e1f1276..e8305233eb0b 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -358,9 +358,9 @@ static struct lock_torture_ops mutex_lock_ops = {
 
 #include <linux/ww_mutex.h>
 static DEFINE_WD_CLASS(torture_ww_class);
-static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
-static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
-static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
+static DEFINE_WW_MUTEX(torture_ww_mutex_0, torture_ww_class);
+static DEFINE_WW_MUTEX(torture_ww_mutex_1, torture_ww_class);
+static DEFINE_WW_MUTEX(torture_ww_mutex_2, torture_ww_class);
 
 static int torture_ww_mutex_lock(void)
 __acquires(torture_ww_mutex_0)

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 17:40               ` Peter Zijlstra
@ 2021-03-17 17:45                 ` Peter Zijlstra
  2021-03-17 18:32                   ` Waiman Long
  2021-03-17 18:14                 ` Waiman Long
  1 sibling, 1 reply; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 17:45 UTC (permalink / raw)
  To: Waiman Long; +Cc: linux-kernel, linux-tip-commits, Ingo Molnar, x86

On Wed, Mar 17, 2021 at 06:40:27PM +0100, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 05:48:48PM +0100, Peter Zijlstra wrote:
> 
> > I think you'll find that if you use ww_mutex_init() it'll all work. Let
> > me go and zap this patch, and then I'll try and figure out why
> > DEFINE_WW_MUTEX() is buggered.
> 
> Moo, I can't get the compiler to do as I want :/
> 
> The below is so close but doesn't actually compile.. Maybe we should
> just give up on DEFINE_WW_MUTEX and simply remove it.
> 
> ---
> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> index 0cd631a19727..85f50538f26a 100644
> --- a/include/linux/mutex.h
> +++ b/include/linux/mutex.h
> @@ -129,12 +129,15 @@ do {									\
>  # define __DEP_MAP_MUTEX_INITIALIZER(lockname)
>  #endif
>  
> -#define __MUTEX_INITIALIZER(lockname) \
> +#define ___MUTEX_INITIALIZER(lockname, depmap) \
>  		{ .owner = ATOMIC_LONG_INIT(0) \
>  		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
>  		, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
>  		__DEBUG_MUTEX_INITIALIZER(lockname) \
> -		__DEP_MAP_MUTEX_INITIALIZER(lockname) }
> +		depmap }
> +
> +#define __MUTEX_INITIALIZER(lockname) \
> +		___MUTEX_INITIALIZER(lockname, __DEP_MAP_MUTEX_INITIALIZER(lockname))
>  
>  #define DEFINE_MUTEX(mutexname) \
>  	struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
> diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
> index 6ecf2a0220db..c62a030652b4 100644
> --- a/include/linux/ww_mutex.h
> +++ b/include/linux/ww_mutex.h
> @@ -50,9 +50,17 @@ struct ww_acquire_ctx {
>  
>  #ifdef CONFIG_DEBUG_LOCK_ALLOC
>  # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
> -		, .ww_class = class
> +		, .ww_class = &(class)
> +
> +# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class) \
> +		, .dep_map = { \
> +			.key = &(class).mutex_key, \
> +			.name = (class).mutex_name, \

			,name = #class "_mutex", \

and it 'works', but shees!

> +			.wait_type_inner = LD_WAIT_SLEEP, \
> +		}
>  #else
>  # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
> +# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class)
>  #endif
>  
>  #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die)	    \
> @@ -62,7 +70,8 @@ struct ww_acquire_ctx {
>  		, .is_wait_die = _is_wait_die }
>  
>  #define __WW_MUTEX_INITIALIZER(lockname, class) \
> -		{ .base =  __MUTEX_INITIALIZER(lockname.base) \
> +		{ .base =  ___MUTEX_INITIALIZER(lockname.base, \
> +			__DEP_MAP_WW_MUTEX_INITIALIZER(lockname.base, class)) \
>  		__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
>  
>  #define DEFINE_WD_CLASS(classname) \
> diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
> index 0ab94e1f1276..e8305233eb0b 100644
> --- a/kernel/locking/locktorture.c
> +++ b/kernel/locking/locktorture.c
> @@ -358,9 +358,9 @@ static struct lock_torture_ops mutex_lock_ops = {
>  
>  #include <linux/ww_mutex.h>
>  static DEFINE_WD_CLASS(torture_ww_class);
> -static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
> -static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
> -static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
> +static DEFINE_WW_MUTEX(torture_ww_mutex_0, torture_ww_class);
> +static DEFINE_WW_MUTEX(torture_ww_mutex_1, torture_ww_class);
> +static DEFINE_WW_MUTEX(torture_ww_mutex_2, torture_ww_class);
>  
>  static int torture_ww_mutex_lock(void)
>  __acquires(torture_ww_mutex_0)

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 17:40               ` Peter Zijlstra
  2021-03-17 17:45                 ` Peter Zijlstra
@ 2021-03-17 18:14                 ` Waiman Long
  1 sibling, 0 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-17 18:14 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, linux-tip-commits, Ingo Molnar, x86

On 3/17/21 1:40 PM, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 05:48:48PM +0100, Peter Zijlstra wrote:
>
>> I think you'll find that if you use ww_mutex_init() it'll all work. Let
>> me go and zap this patch, and then I'll try and figure out why
>> DEFINE_WW_MUTEX() is buggered.
> Moo, I can't get the compiler to do as I want :/
>
> The below is so close but doesn't actually compile.. Maybe we should
> just give up on DEFINE_WW_MUTEX and simply remove it.
>
> ---
> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> index 0cd631a19727..85f50538f26a 100644
> --- a/include/linux/mutex.h
> +++ b/include/linux/mutex.h
> @@ -129,12 +129,15 @@ do {									\
>   # define __DEP_MAP_MUTEX_INITIALIZER(lockname)
>   #endif
>   
> -#define __MUTEX_INITIALIZER(lockname) \
> +#define ___MUTEX_INITIALIZER(lockname, depmap) \
>   		{ .owner = ATOMIC_LONG_INIT(0) \
>   		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
>   		, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
>   		__DEBUG_MUTEX_INITIALIZER(lockname) \
> -		__DEP_MAP_MUTEX_INITIALIZER(lockname) }
> +		depmap }
> +
> +#define __MUTEX_INITIALIZER(lockname) \
> +		___MUTEX_INITIALIZER(lockname, __DEP_MAP_MUTEX_INITIALIZER(lockname))
>   
>   #define DEFINE_MUTEX(mutexname) \
>   	struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
> diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
> index 6ecf2a0220db..c62a030652b4 100644
> --- a/include/linux/ww_mutex.h
> +++ b/include/linux/ww_mutex.h
> @@ -50,9 +50,17 @@ struct ww_acquire_ctx {
>   
>   #ifdef CONFIG_DEBUG_LOCK_ALLOC
>   # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
> -		, .ww_class = class
> +		, .ww_class = &(class)
> +
> +# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class) \
> +		, .dep_map = { \
> +			.key = &(class).mutex_key, \
> +			.name = (class).mutex_name, \
> +			.wait_type_inner = LD_WAIT_SLEEP, \
> +		}
>   #else
>   # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
> +# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class)
>   #endif
>   
>   #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die)	    \
> @@ -62,7 +70,8 @@ struct ww_acquire_ctx {
>   		, .is_wait_die = _is_wait_die }
>   
>   #define __WW_MUTEX_INITIALIZER(lockname, class) \
> -		{ .base =  __MUTEX_INITIALIZER(lockname.base) \
> +		{ .base =  ___MUTEX_INITIALIZER(lockname.base, \
> +			__DEP_MAP_WW_MUTEX_INITIALIZER(lockname.base, class)) \
>   		__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
>   
>   #define DEFINE_WD_CLASS(classname) \
> diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
> index 0ab94e1f1276..e8305233eb0b 100644
> --- a/kernel/locking/locktorture.c
> +++ b/kernel/locking/locktorture.c
> @@ -358,9 +358,9 @@ static struct lock_torture_ops mutex_lock_ops = {
>   
>   #include <linux/ww_mutex.h>
>   static DEFINE_WD_CLASS(torture_ww_class);
> -static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
> -static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
> -static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
> +static DEFINE_WW_MUTEX(torture_ww_mutex_0, torture_ww_class);
> +static DEFINE_WW_MUTEX(torture_ww_mutex_1, torture_ww_class);
> +static DEFINE_WW_MUTEX(torture_ww_mutex_2, torture_ww_class);
>   
>   static int torture_ww_mutex_lock(void)
>   __acquires(torture_ww_mutex_0)
>
I changed locktorture to use ww_mutex_init() and the lockdep splat was 
indeed gone. So zapping WW_MUTEX_INIT() and use ww_mutex_init() is a 
possible solution. I will send out a patch to do that.

Thanks,
Longman


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 17:45                 ` Peter Zijlstra
@ 2021-03-17 18:32                   ` Waiman Long
  2021-03-17 19:58                     ` Peter Zijlstra
  0 siblings, 1 reply; 32+ messages in thread
From: Waiman Long @ 2021-03-17 18:32 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, linux-tip-commits, Ingo Molnar, x86

On 3/17/21 1:45 PM, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 06:40:27PM +0100, Peter Zijlstra wrote:
>> On Wed, Mar 17, 2021 at 05:48:48PM +0100, Peter Zijlstra wrote:
>>
>>> I think you'll find that if you use ww_mutex_init() it'll all work. Let
>>> me go and zap this patch, and then I'll try and figure out why
>>> DEFINE_WW_MUTEX() is buggered.
>> Moo, I can't get the compiler to do as I want :/
>>
>> The below is so close but doesn't actually compile.. Maybe we should
>> just give up on DEFINE_WW_MUTEX and simply remove it.
>>
>> ---
>> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
>> index 0cd631a19727..85f50538f26a 100644
>> --- a/include/linux/mutex.h
>> +++ b/include/linux/mutex.h
>> @@ -129,12 +129,15 @@ do {									\
>>   # define __DEP_MAP_MUTEX_INITIALIZER(lockname)
>>   #endif
>>   
>> -#define __MUTEX_INITIALIZER(lockname) \
>> +#define ___MUTEX_INITIALIZER(lockname, depmap) \
>>   		{ .owner = ATOMIC_LONG_INIT(0) \
>>   		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
>>   		, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
>>   		__DEBUG_MUTEX_INITIALIZER(lockname) \
>> -		__DEP_MAP_MUTEX_INITIALIZER(lockname) }
>> +		depmap }
>> +
>> +#define __MUTEX_INITIALIZER(lockname) \
>> +		___MUTEX_INITIALIZER(lockname, __DEP_MAP_MUTEX_INITIALIZER(lockname))
>>   
>>   #define DEFINE_MUTEX(mutexname) \
>>   	struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
>> diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
>> index 6ecf2a0220db..c62a030652b4 100644
>> --- a/include/linux/ww_mutex.h
>> +++ b/include/linux/ww_mutex.h
>> @@ -50,9 +50,17 @@ struct ww_acquire_ctx {
>>   
>>   #ifdef CONFIG_DEBUG_LOCK_ALLOC
>>   # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
>> -		, .ww_class = class
>> +		, .ww_class = &(class)
>> +
>> +# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class) \
>> +		, .dep_map = { \
>> +			.key = &(class).mutex_key, \
>> +			.name = (class).mutex_name, \
> 			,name = #class "_mutex", \
>
> and it 'works', but shees!

The name string itself may be duplicated for multiple instances of 
DEFINE_WW_MUTEX(). Do you want to keep DEFINE_WW_MUTEX() or just use 
ww_mutex_init() for all?

I notice that the problem with DEFINE_WW_MUTEX is that the ww_mutex 
themselves has null key instead of the same key from the ww_class as 
with ww_mutex_init().

Cheers,
Longman



^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 18:32                   ` Waiman Long
@ 2021-03-17 19:58                     ` Peter Zijlstra
  2021-03-17 20:20                       ` Waiman Long
  0 siblings, 1 reply; 32+ messages in thread
From: Peter Zijlstra @ 2021-03-17 19:58 UTC (permalink / raw)
  To: Waiman Long; +Cc: linux-kernel, linux-tip-commits, Ingo Molnar, x86

On Wed, Mar 17, 2021 at 02:32:27PM -0400, Waiman Long wrote:
> On 3/17/21 1:45 PM, Peter Zijlstra wrote:

> > > +# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class) \
> > > +		, .dep_map = { \
> > > +			.key = &(class).mutex_key, \
> > > +			.name = (class).mutex_name, \
> > 			,name = #class "_mutex", \
> > 
> > and it 'works', but shees!
> 
> The name string itself may be duplicated for multiple instances of
> DEFINE_WW_MUTEX(). Do you want to keep DEFINE_WW_MUTEX() or just use
> ww_mutex_init() for all?

So linkers can merge literals, but no guarantee. But yeah, lets just
kill the thing, less tricky macro crud to worry about.

> I notice that the problem with DEFINE_WW_MUTEX is that the ww_mutex
> themselves has null key instead of the same key from the ww_class as with
> ww_mutex_init().

Correct.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [tip: locking/urgent] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-17 19:58                     ` Peter Zijlstra
@ 2021-03-17 20:20                       ` Waiman Long
  0 siblings, 0 replies; 32+ messages in thread
From: Waiman Long @ 2021-03-17 20:20 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, linux-tip-commits, Ingo Molnar, x86

On 3/17/21 3:58 PM, Peter Zijlstra wrote:
> On Wed, Mar 17, 2021 at 02:32:27PM -0400, Waiman Long wrote:
>> On 3/17/21 1:45 PM, Peter Zijlstra wrote:
>>>> +# define __DEP_MAP_WW_MUTEX_INITIALIZER(lockname, class) \
>>>> +		, .dep_map = { \
>>>> +			.key = &(class).mutex_key, \
>>>> +			.name = (class).mutex_name, \
>>> 			,name = #class "_mutex", \
>>>
>>> and it 'works', but shees!
>> The name string itself may be duplicated for multiple instances of
>> DEFINE_WW_MUTEX(). Do you want to keep DEFINE_WW_MUTEX() or just use
>> ww_mutex_init() for all?
> So linkers can merge literals, but no guarantee. But yeah, lets just
> kill the thing, less tricky macro crud to worry about.
>
Good, just to confirm the right way to move forward.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-16 15:31 ` [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock Waiman Long
  2021-03-17  3:01   ` Davidlohr Bueso
  2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
@ 2021-03-18  2:24   ` Boqun Feng
  2021-03-18  2:54     ` Waiman Long
  2 siblings, 1 reply; 32+ messages in thread
From: Boqun Feng @ 2021-03-18  2:24 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Paul E. McKenney,
	Davidlohr Bueso, linux-kernel, Juri Lelli

Hi Waiman,

Just a question out of curiosity: how does this problem hide so long?
;-) Because IIUC, both locktorture and ww_mutex_lock have been there for
a while, so why didn't we spot this earlier?

I ask just to make sure we don't introduce the problem because of some
subtle problems in lock(dep).

Regards,
Boqun

On Tue, Mar 16, 2021 at 11:31:18AM -0400, Waiman Long wrote:
> It was found that running the ww_mutex_lock-torture test produced the
> following lockdep splat almost immediately:
> 
> [  103.892638] ======================================================
> [  103.892639] WARNING: possible circular locking dependency detected
> [  103.892641] 5.12.0-rc3-debug+ #2 Tainted: G S      W
> [  103.892643] ------------------------------------------------------
> [  103.892643] lock_torture_wr/3234 is trying to acquire lock:
> [  103.892646] ffffffffc0b35b10 (torture_ww_mutex_2.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892660]
> [  103.892660] but task is already holding lock:
> [  103.892661] ffffffffc0b35cd0 (torture_ww_mutex_0.base){+.+.}-{3:3}, at: torture_ww_mutex_lock+0x3e2/0x720 [locktorture]
> [  103.892669]
> [  103.892669] which lock already depends on the new lock.
> [  103.892669]
> [  103.892670]
> [  103.892670] the existing dependency chain (in reverse order) is:
> [  103.892671]
> [  103.892671] -> #2 (torture_ww_mutex_0.base){+.+.}-{3:3}:
> [  103.892675]        lock_acquire+0x1c5/0x830
> [  103.892682]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
> [  103.892687]        ww_mutex_lock+0x4b/0x180
> [  103.892690]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892694]        lock_torture_writer+0x142/0x3a0 [locktorture]
> [  103.892698]        kthread+0x35f/0x430
> [  103.892701]        ret_from_fork+0x1f/0x30
> [  103.892706]
> [  103.892706] -> #1 (torture_ww_mutex_1.base){+.+.}-{3:3}:
> [  103.892709]        lock_acquire+0x1c5/0x830
> [  103.892712]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
> [  103.892715]        ww_mutex_lock+0x4b/0x180
> [  103.892717]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892721]        lock_torture_writer+0x142/0x3a0 [locktorture]
> [  103.892725]        kthread+0x35f/0x430
> [  103.892727]        ret_from_fork+0x1f/0x30
> [  103.892730]
> [  103.892730] -> #0 (torture_ww_mutex_2.base){+.+.}-{3:3}:
> [  103.892733]        check_prevs_add+0x3fd/0x2470
> [  103.892736]        __lock_acquire+0x2602/0x3100
> [  103.892738]        lock_acquire+0x1c5/0x830
> [  103.892740]        __ww_mutex_lock.constprop.15+0x1d1/0x2e50
> [  103.892743]        ww_mutex_lock+0x4b/0x180
> [  103.892746]        torture_ww_mutex_lock+0x316/0x720 [locktorture]
> [  103.892749]        lock_torture_writer+0x142/0x3a0 [locktorture]
> [  103.892753]        kthread+0x35f/0x430
> [  103.892755]        ret_from_fork+0x1f/0x30
> [  103.892757]
> [  103.892757] other info that might help us debug this:
> [  103.892757]
> [  103.892758] Chain exists of:
> [  103.892758]   torture_ww_mutex_2.base --> torture_ww_mutex_1.base --> torture_ww_mutex_0.base
> [  103.892758]
> [  103.892763]  Possible unsafe locking scenario:
> [  103.892763]
> [  103.892764]        CPU0                    CPU1
> [  103.892765]        ----                    ----
> [  103.892765]   lock(torture_ww_mutex_0.base);
> [  103.892767] 				      lock(torture_ww_mutex_1.base);
> [  103.892770] 				      lock(torture_ww_mutex_0.base);
> [  103.892772]   lock(torture_ww_mutex_2.base);
> [  103.892774]
> [  103.892774]  *** DEADLOCK ***
> 
> Since ww_mutex is supposed to be deadlock-proof if used properly, such
> deadlock scenario should not happen. To avoid this false positive splat,
> treat ww_mutex_lock() like a trylock().
> 
> After applying this patch, the locktorture test can run for a long time
> without triggering the circular locking dependency splat.
> 
> Signed-off-by: Waiman Long <longman@redhat.com>
> ---
>  kernel/locking/mutex.c | 5 ++++-
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
> index 622ebdfcd083..bb89393cd3a2 100644
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -946,7 +946,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
>  	}
>  
>  	preempt_disable();
> -	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
> +	/*
> +	 * Treat as trylock for ww_mutex.
> +	 */
> +	mutex_acquire_nest(&lock->dep_map, subclass, !!ww_ctx, nest_lock, ip);
>  
>  	if (__mutex_trylock(lock) ||
>  	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
> -- 
> 2.18.1
> 

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-18  2:24   ` [PATCH 3/4] " Boqun Feng
@ 2021-03-18  2:54     ` Waiman Long
  2021-03-18  6:36       ` Boqun Feng
  0 siblings, 1 reply; 32+ messages in thread
From: Waiman Long @ 2021-03-18  2:54 UTC (permalink / raw)
  To: Boqun Feng
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Paul E. McKenney,
	Davidlohr Bueso, linux-kernel, Juri Lelli

On 3/17/21 10:24 PM, Boqun Feng wrote:
> Hi Waiman,
>
> Just a question out of curiosity: how does this problem hide so long?
> ;-) Because IIUC, both locktorture and ww_mutex_lock have been there for
> a while, so why didn't we spot this earlier?
>
> I ask just to make sure we don't introduce the problem because of some
> subtle problems in lock(dep).
>
You have to explicitly specify ww_mutex in the locktorture module 
parameter to run the test. ww_mutex is usually not the intended target 
of testing as there aren't that many places that use it. Even if someone 
run it, it probably is not on a debug kernel.

Our QA people try to run locktorture on ww_mutex and discover that.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock
  2021-03-18  2:54     ` Waiman Long
@ 2021-03-18  6:36       ` Boqun Feng
  0 siblings, 0 replies; 32+ messages in thread
From: Boqun Feng @ 2021-03-18  6:36 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Paul E. McKenney,
	Davidlohr Bueso, linux-kernel, Juri Lelli

On Wed, Mar 17, 2021 at 10:54:17PM -0400, Waiman Long wrote:
> On 3/17/21 10:24 PM, Boqun Feng wrote:
> > Hi Waiman,
> > 
> > Just a question out of curiosity: how does this problem hide so long?
> > ;-) Because IIUC, both locktorture and ww_mutex_lock have been there for
> > a while, so why didn't we spot this earlier?
> > 
> > I ask just to make sure we don't introduce the problem because of some
> > subtle problems in lock(dep).
> > 
> You have to explicitly specify ww_mutex in the locktorture module parameter
> to run the test. ww_mutex is usually not the intended target of testing as
> there aren't that many places that use it. Even if someone run it, it
> probably is not on a debug kernel.
> 
> Our QA people try to run locktorture on ww_mutex and discover that.
> 

Got it. Thanks ;-)

Regards,
Boqun

> Cheers,
> Longman
> 

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2021-03-18  6:37 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-16 15:31 [PATCH 0/4] locking/ww_mutex: Fix locktorture ww_mutex test problems Waiman Long
2021-03-16 15:31 ` [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling Waiman Long
2021-03-16 18:55   ` Davidlohr Bueso
2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
2021-03-17 12:59     ` Peter Zijlstra
2021-03-17 13:43       ` Waiman Long
2021-03-17 13:55         ` Peter Zijlstra
2021-03-17 14:10           ` Waiman Long
2021-03-17 14:17             ` Peter Zijlstra
2021-03-17 14:33             ` Waiman Long
2021-03-16 15:31 ` [PATCH 2/4] locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini() Waiman Long
2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
2021-03-16 15:31 ` [PATCH 3/4] locking/ww_mutex: Treat ww_mutex_lock() like a trylock Waiman Long
2021-03-17  3:01   ` Davidlohr Bueso
2021-03-17 12:38   ` [tip: locking/urgent] " tip-bot2 for Waiman Long
2021-03-17 13:12     ` Peter Zijlstra
2021-03-17 13:31       ` Peter Zijlstra
2021-03-17 14:03         ` Waiman Long
2021-03-17 15:35           ` Waiman Long
2021-03-17 16:48             ` Peter Zijlstra
2021-03-17 17:40               ` Peter Zijlstra
2021-03-17 17:45                 ` Peter Zijlstra
2021-03-17 18:32                   ` Waiman Long
2021-03-17 19:58                     ` Peter Zijlstra
2021-03-17 20:20                       ` Waiman Long
2021-03-17 18:14                 ` Waiman Long
2021-03-18  2:24   ` [PATCH 3/4] " Boqun Feng
2021-03-18  2:54     ` Waiman Long
2021-03-18  6:36       ` Boqun Feng
2021-03-16 15:31 ` [PATCH 4/4] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test Waiman Long
2021-03-17  5:16   ` Davidlohr Bueso
2021-03-17 13:21     ` Waiman Long

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).