All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes
@ 2021-06-30 15:35 Peter Zijlstra
  2021-06-30 15:35 ` [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg() Peter Zijlstra
                   ` (5 more replies)
  0 siblings, 6 replies; 21+ messages in thread
From: Peter Zijlstra @ 2021-06-30 15:35 UTC (permalink / raw)
  To: mingo, longman, boqun.feng, will; +Cc: linux-kernel, peterz, yanfei.xu

Hi,

Cleanup and fix a few issues reported by Yanfei.

Waiman, I didn't preserve your reviewed-by, because there's a few extra
changes, and I've not yet boot tested them.


^ permalink raw reply	[flat|nested] 21+ messages in thread

* [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg()
  2021-06-30 15:35 [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Peter Zijlstra
@ 2021-06-30 15:35 ` Peter Zijlstra
  2021-07-05 11:59   ` Xu, Yanfei
  2021-07-08  8:42   ` [tip: locking/core] locking/mutex: Use try_cmpxchg() tip-bot2 for Peter Zijlstra
  2021-06-30 15:35 ` [RFC][PATCH 2/4] locking/mutex: Fix HANDOFF condition Peter Zijlstra
                   ` (4 subsequent siblings)
  5 siblings, 2 replies; 21+ messages in thread
From: Peter Zijlstra @ 2021-06-30 15:35 UTC (permalink / raw)
  To: mingo, longman, boqun.feng, will; +Cc: linux-kernel, peterz, yanfei.xu

For simpler and better code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/locking/mutex.c |   27 ++++++---------------------
 1 file changed, 6 insertions(+), 21 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -100,7 +100,7 @@ static inline struct task_struct *__mute
 
 	owner = atomic_long_read(&lock->owner);
 	for (;;) { /* must loop, can race against a flag */
-		unsigned long old, flags = __owner_flags(owner);
+		unsigned long flags = __owner_flags(owner);
 		unsigned long task = owner & ~MUTEX_FLAGS;
 
 		if (task) {
@@ -124,11 +124,8 @@ static inline struct task_struct *__mute
 		 */
 		flags &= ~MUTEX_FLAG_HANDOFF;
 
-		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
-		if (old == owner)
+		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, curr | flags))
 			return NULL;
-
-		owner = old;
 	}
 
 	return __owner_task(owner);
@@ -168,10 +165,7 @@ static __always_inline bool __mutex_unlo
 {
 	unsigned long curr = (unsigned long)current;
 
-	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
-		return true;
-
-	return false;
+	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
 }
 #endif
 
@@ -226,7 +220,7 @@ static void __mutex_handoff(struct mutex
 	unsigned long owner = atomic_long_read(&lock->owner);
 
 	for (;;) {
-		unsigned long old, new;
+		unsigned long new;
 
 #ifdef CONFIG_DEBUG_MUTEXES
 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
@@ -238,11 +232,8 @@ static void __mutex_handoff(struct mutex
 		if (task)
 			new |= MUTEX_FLAG_PICKUP;
 
-		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
-		if (old == owner)
+		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
 			break;
-
-		owner = old;
 	}
 }
 
@@ -1237,8 +1228,6 @@ static noinline void __sched __mutex_unl
 	 */
 	owner = atomic_long_read(&lock->owner);
 	for (;;) {
-		unsigned long old;
-
 #ifdef CONFIG_DEBUG_MUTEXES
 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
@@ -1247,16 +1236,12 @@ static noinline void __sched __mutex_unl
 		if (owner & MUTEX_FLAG_HANDOFF)
 			break;
 
-		old = atomic_long_cmpxchg_release(&lock->owner, owner,
-						  __owner_flags(owner));
-		if (old == owner) {
+		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
 			if (owner & MUTEX_FLAG_WAITERS)
 				break;
 
 			return;
 		}
-
-		owner = old;
 	}
 
 	spin_lock(&lock->wait_lock);



^ permalink raw reply	[flat|nested] 21+ messages in thread

* [RFC][PATCH 2/4] locking/mutex: Fix HANDOFF condition
  2021-06-30 15:35 [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Peter Zijlstra
  2021-06-30 15:35 ` [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg() Peter Zijlstra
@ 2021-06-30 15:35 ` Peter Zijlstra
  2021-07-08  8:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
  2021-06-30 15:35 ` [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff() Peter Zijlstra
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Peter Zijlstra @ 2021-06-30 15:35 UTC (permalink / raw)
  To: mingo, longman, boqun.feng, will; +Cc: linux-kernel, peterz, yanfei.xu

Yanfei reported that setting HANDOFF should not depend on recomputing
@first, only on @first state. Which would then give:

  if (ww_ctx || !first)
    first = __mutex_waiter_is_first(lock, &waiter);
  if (first)
    __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);

But because 'ww_ctx || !first' is basically 'always' and the test for
first is relatively cheap, omit that first branch entirely.

Reported-by: Yanfei Xu <yanfei.xu@windriver.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/locking/mutex.c |   15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -919,7 +919,6 @@ __mutex_lock_common(struct mutex *lock,
 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
 	struct mutex_waiter waiter;
-	bool first = false;
 	struct ww_mutex *ww;
 	int ret;
 
@@ -998,6 +997,8 @@ __mutex_lock_common(struct mutex *lock,
 
 	set_current_state(state);
 	for (;;) {
+		bool first;
+
 		/*
 		 * Once we hold wait_lock, we're serialized against
 		 * mutex_unlock() handing the lock off to us, do a trylock
@@ -1026,15 +1027,9 @@ __mutex_lock_common(struct mutex *lock,
 		spin_unlock(&lock->wait_lock);
 		schedule_preempt_disabled();
 
-		/*
-		 * ww_mutex needs to always recheck its position since its waiter
-		 * list is not FIFO ordered.
-		 */
-		if (ww_ctx || !first) {
-			first = __mutex_waiter_is_first(lock, &waiter);
-			if (first)
-				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
-		}
+		first = __mutex_waiter_is_first(lock, &waiter);
+		if (first)
+			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
 
 		set_current_state(state);
 		/*



^ permalink raw reply	[flat|nested] 21+ messages in thread

* [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff()
  2021-06-30 15:35 [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Peter Zijlstra
  2021-06-30 15:35 ` [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg() Peter Zijlstra
  2021-06-30 15:35 ` [RFC][PATCH 2/4] locking/mutex: Fix HANDOFF condition Peter Zijlstra
@ 2021-06-30 15:35 ` Peter Zijlstra
  2021-06-30 16:30   ` Waiman Long
  2021-07-08  8:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
  2021-06-30 15:35 ` [RFC][PATCH 4/4] locking/mutex: Add MUTEX_WARN_ON Peter Zijlstra
                   ` (2 subsequent siblings)
  5 siblings, 2 replies; 21+ messages in thread
From: Peter Zijlstra @ 2021-06-30 15:35 UTC (permalink / raw)
  To: mingo, longman, boqun.feng, will; +Cc: linux-kernel, peterz, yanfei.xu

Yanfei reported that it is possible to loose HANDOFF when we race with
mutex_unlock() and end up setting HANDOFF on an unlocked mutex. At
that point anybody can steal it, loosing HANDOFF in the process.

If this happens often enough, we can in fact starve the top waiter.

Solve this by folding the 'set HANDOFF' operation into the trylock
operation, such that either we acquire the lock, or it gets HANDOFF
set. This avoids having HANDOFF set on an unlocked mutex.

Reported-by: Yanfei Xu <yanfei.xu@windriver.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/locking/mutex.c |   58 +++++++++++++++++++++++++++++--------------------
 1 file changed, 35 insertions(+), 23 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -91,10 +91,7 @@ static inline unsigned long __owner_flag
 	return owner & MUTEX_FLAGS;
 }
 
-/*
- * Trylock variant that returns the owning task on failure.
- */
-static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
+static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
 {
 	unsigned long owner, curr = (unsigned long)current;
 
@@ -104,39 +101,56 @@ static inline struct task_struct *__mute
 		unsigned long task = owner & ~MUTEX_FLAGS;
 
 		if (task) {
-			if (likely(task != curr))
+			if (flags & MUTEX_FLAG_PICKUP) {
+				if (task != curr)
+					break;
+				flags &= ~MUTEX_FLAG_HANDOFF;
+			} else if (handoff) {
+				if (flags & MUTEX_FLAG_HANDOFF)
+					break;
+				flags |= MUTEX_FLAG_HANDOFF;
+			} else {
 				break;
-
-			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
-				break;
-
-			flags &= ~MUTEX_FLAG_PICKUP;
+			}
 		} else {
 #ifdef CONFIG_DEBUG_MUTEXES
 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
 #endif
+			task = curr;
 		}
 
-		/*
-		 * We set the HANDOFF bit, we must make sure it doesn't live
-		 * past the point where we acquire it. This would be possible
-		 * if we (accidentally) set the bit on an unlocked mutex.
-		 */
-		flags &= ~MUTEX_FLAG_HANDOFF;
-
-		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, curr | flags))
-			return NULL;
+		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
+			if (task == curr)
+				return NULL;
+			break;
+		}
 	}
 
 	return __owner_task(owner);
 }
 
 /*
+ * Trylock or set HANDOFF
+ */
+static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
+{
+	return !__mutex_trylock_common(lock, handoff);
+}
+
+/*
+ * Trylock variant that returns the owning task on failure.
+ */
+static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
+{
+	return __mutex_trylock_common(lock, false);
+}
+
+/*
  * Actual trylock that will work on any unlocked state.
  */
 static inline bool __mutex_trylock(struct mutex *lock)
 {
-	return !__mutex_trylock_or_owner(lock);
+	return !__mutex_trylock_common(lock, false);
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
@@ -1028,8 +1042,6 @@ __mutex_lock_common(struct mutex *lock,
 		schedule_preempt_disabled();
 
 		first = __mutex_waiter_is_first(lock, &waiter);
-		if (first)
-			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
 
 		set_current_state(state);
 		/*
@@ -1037,7 +1049,7 @@ __mutex_lock_common(struct mutex *lock,
 		 * state back to RUNNING and fall through the next schedule(),
 		 * or we must see its unlock and acquire.
 		 */
-		if (__mutex_trylock(lock) ||
+		if (__mutex_trylock_or_handoff(lock, first) ||
 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
 			break;
 



^ permalink raw reply	[flat|nested] 21+ messages in thread

* [RFC][PATCH 4/4] locking/mutex: Add MUTEX_WARN_ON
  2021-06-30 15:35 [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Peter Zijlstra
                   ` (2 preceding siblings ...)
  2021-06-30 15:35 ` [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff() Peter Zijlstra
@ 2021-06-30 15:35 ` Peter Zijlstra
  2021-07-08  8:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
  2021-06-30 19:03 ` [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Waiman Long
  2021-07-01  2:11 ` Xu, Yanfei
  5 siblings, 1 reply; 21+ messages in thread
From: Peter Zijlstra @ 2021-06-30 15:35 UTC (permalink / raw)
  To: mingo, longman, boqun.feng, will; +Cc: linux-kernel, peterz, yanfei.xu

Cleanup some #ifdef'fery.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/locking/mutex.c |   30 ++++++++++--------------------
 1 file changed, 10 insertions(+), 20 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -32,8 +32,10 @@
 
 #ifdef CONFIG_DEBUG_MUTEXES
 # include "mutex-debug.h"
+# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
 #else
 # include "mutex.h"
+# define MUTEX_WARN_ON(cond)
 #endif
 
 void
@@ -113,9 +115,7 @@ static inline struct task_struct *__mute
 				break;
 			}
 		} else {
-#ifdef CONFIG_DEBUG_MUTEXES
-			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
-#endif
+			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
 			task = curr;
 		}
 
@@ -236,10 +236,8 @@ static void __mutex_handoff(struct mutex
 	for (;;) {
 		unsigned long new;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+		MUTEX_WARN_ON(__owner_task(owner) != current);
+		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
 		new = (owner & MUTEX_FLAG_WAITERS);
 		new |= (unsigned long)task;
@@ -764,9 +762,7 @@ void __sched ww_mutex_unlock(struct ww_m
 	 * into 'unlocked' state:
 	 */
 	if (lock->ctx) {
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
-#endif
+		MUTEX_WARN_ON(!lock->ctx->acquired);
 		if (lock->ctx->acquired > 0)
 			lock->ctx->acquired--;
 		lock->ctx = NULL;
@@ -941,9 +937,7 @@ __mutex_lock_common(struct mutex *lock,
 
 	might_sleep();
 
-#ifdef CONFIG_DEBUG_MUTEXES
-	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+	MUTEX_WARN_ON(lock->magic != lock);
 
 	ww = container_of(lock, struct ww_mutex, base);
 	if (ww_ctx) {
@@ -1235,10 +1229,8 @@ static noinline void __sched __mutex_unl
 	 */
 	owner = atomic_long_read(&lock->owner);
 	for (;;) {
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+		MUTEX_WARN_ON(__owner_task(owner) != current);
+		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
 		if (owner & MUTEX_FLAG_HANDOFF)
 			break;
@@ -1404,9 +1396,7 @@ int __sched mutex_trylock(struct mutex *
 {
 	bool locked;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+	MUTEX_WARN_ON(lock->magic != lock);
 
 	locked = __mutex_trylock(lock);
 	if (locked)



^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff()
  2021-06-30 15:35 ` [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff() Peter Zijlstra
@ 2021-06-30 16:30   ` Waiman Long
  2021-06-30 18:04     ` Peter Zijlstra
  2021-07-08  8:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
  1 sibling, 1 reply; 21+ messages in thread
From: Waiman Long @ 2021-06-30 16:30 UTC (permalink / raw)
  To: Peter Zijlstra, mingo, boqun.feng, will; +Cc: linux-kernel, yanfei.xu

On 6/30/21 11:35 AM, Peter Zijlstra wrote:
> Yanfei reported that it is possible to loose HANDOFF when we race with
> mutex_unlock() and end up setting HANDOFF on an unlocked mutex. At
> that point anybody can steal it, loosing HANDOFF in the process.
>
> If this happens often enough, we can in fact starve the top waiter.
>
> Solve this by folding the 'set HANDOFF' operation into the trylock
> operation, such that either we acquire the lock, or it gets HANDOFF
> set. This avoids having HANDOFF set on an unlocked mutex.
>
> Reported-by: Yanfei Xu <yanfei.xu@windriver.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>   kernel/locking/mutex.c |   58 +++++++++++++++++++++++++++++--------------------
>   1 file changed, 35 insertions(+), 23 deletions(-)
>
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -91,10 +91,7 @@ static inline unsigned long __owner_flag
>   	return owner & MUTEX_FLAGS;
>   }
>   
> -/*
> - * Trylock variant that returns the owning task on failure.
> - */
> -static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
> +static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
>   {
>   	unsigned long owner, curr = (unsigned long)current;
>   
> @@ -104,39 +101,56 @@ static inline struct task_struct *__mute
>   		unsigned long task = owner & ~MUTEX_FLAGS;
>   
>   		if (task) {
> -			if (likely(task != curr))
> +			if (flags & MUTEX_FLAG_PICKUP) {
> +				if (task != curr)
> +					break;
> +				flags &= ~MUTEX_FLAG_HANDOFF;

I think you mean "flags &= ~MUTEX_FLAG_PICKUP". Right:-)

Cheers,
Longman


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff()
  2021-06-30 16:30   ` Waiman Long
@ 2021-06-30 18:04     ` Peter Zijlstra
  0 siblings, 0 replies; 21+ messages in thread
From: Peter Zijlstra @ 2021-06-30 18:04 UTC (permalink / raw)
  To: Waiman Long; +Cc: mingo, boqun.feng, will, linux-kernel, yanfei.xu

On Wed, Jun 30, 2021 at 12:30:43PM -0400, Waiman Long wrote:
> On 6/30/21 11:35 AM, Peter Zijlstra wrote:
> > Yanfei reported that it is possible to loose HANDOFF when we race with
> > mutex_unlock() and end up setting HANDOFF on an unlocked mutex. At
> > that point anybody can steal it, loosing HANDOFF in the process.
> > 
> > If this happens often enough, we can in fact starve the top waiter.
> > 
> > Solve this by folding the 'set HANDOFF' operation into the trylock
> > operation, such that either we acquire the lock, or it gets HANDOFF
> > set. This avoids having HANDOFF set on an unlocked mutex.
> > 
> > Reported-by: Yanfei Xu <yanfei.xu@windriver.com>
> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> > ---
> >   kernel/locking/mutex.c |   58 +++++++++++++++++++++++++++++--------------------
> >   1 file changed, 35 insertions(+), 23 deletions(-)
> > 
> > --- a/kernel/locking/mutex.c
> > +++ b/kernel/locking/mutex.c
> > @@ -91,10 +91,7 @@ static inline unsigned long __owner_flag
> >   	return owner & MUTEX_FLAGS;
> >   }
> > -/*
> > - * Trylock variant that returns the owning task on failure.
> > - */
> > -static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
> > +static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
> >   {
> >   	unsigned long owner, curr = (unsigned long)current;
> > @@ -104,39 +101,56 @@ static inline struct task_struct *__mute
> >   		unsigned long task = owner & ~MUTEX_FLAGS;
> >   		if (task) {
> > -			if (likely(task != curr))
> > +			if (flags & MUTEX_FLAG_PICKUP) {
> > +				if (task != curr)
> > +					break;
> > +				flags &= ~MUTEX_FLAG_HANDOFF;
> 
> I think you mean "flags &= ~MUTEX_FLAG_PICKUP". Right:-)

Duh, yes. That's what you get trying to write patches with a kid in your
lap.. :-)

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes
  2021-06-30 15:35 [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Peter Zijlstra
                   ` (3 preceding siblings ...)
  2021-06-30 15:35 ` [RFC][PATCH 4/4] locking/mutex: Add MUTEX_WARN_ON Peter Zijlstra
@ 2021-06-30 19:03 ` Waiman Long
  2021-07-01  2:11 ` Xu, Yanfei
  5 siblings, 0 replies; 21+ messages in thread
From: Waiman Long @ 2021-06-30 19:03 UTC (permalink / raw)
  To: Peter Zijlstra, mingo, boqun.feng, will; +Cc: linux-kernel, yanfei.xu

On 6/30/21 11:35 AM, Peter Zijlstra wrote:
> Hi,
>
> Cleanup and fix a few issues reported by Yanfei.
>
> Waiman, I didn't preserve your reviewed-by, because there's a few extra
> changes, and I've not yet boot tested them.
>
Other than the typo in patch 3, the rests look good to me. I had also 
boot-tested the patchset and run some mutex stress test without any problem.

Reviewed-by: Waiman Long <longman@redhat.com>


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes
  2021-06-30 15:35 [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Peter Zijlstra
                   ` (4 preceding siblings ...)
  2021-06-30 19:03 ` [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Waiman Long
@ 2021-07-01  2:11 ` Xu, Yanfei
  5 siblings, 0 replies; 21+ messages in thread
From: Xu, Yanfei @ 2021-07-01  2:11 UTC (permalink / raw)
  To: Peter Zijlstra, mingo, longman, boqun.feng, will; +Cc: linux-kernel



On 6/30/21 11:35 PM, Peter Zijlstra wrote:
> [Please note: This e-mail is from an EXTERNAL e-mail address]
> 
> Hi,
> 
> Cleanup and fix a few issues reported by Yanfei.
> 
> Waiman, I didn't preserve your reviewed-by, because there's a few extra
> changes, and I've not yet boot tested them.
> 

Looks perfect to me :)

Reviewed-by: Yanfei Xu <yanfei.xu@windriver.com>


Regards,
Yanfei

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg()
  2021-06-30 15:35 ` [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg() Peter Zijlstra
@ 2021-07-05 11:59   ` Xu, Yanfei
  2021-07-05 14:00     ` Peter Zijlstra
  2021-07-08  8:42   ` [tip: locking/core] locking/mutex: Use try_cmpxchg() tip-bot2 for Peter Zijlstra
  1 sibling, 1 reply; 21+ messages in thread
From: Xu, Yanfei @ 2021-07-05 11:59 UTC (permalink / raw)
  To: Peter Zijlstra, mingo, longman, boqun.feng, will; +Cc: linux-kernel



On 6/30/21 11:35 PM, Peter Zijlstra wrote:
> For simpler and better code.
> 
> Signed-off-by: Peter Zijlstra (Intel)<peterz@infradead.org>
> ---
>   kernel/locking/mutex.c |   27 ++++++---------------------
>   1 file changed, 6 insertions(+), 21 deletions(-)

Hi Peter,

I read the mutex codes today, and find there seems something wrong for 
the patch. Should we consider the race condition as blow?

 From 4035f50c96e17cbe3febab768b64da5c000e5b76 Mon Sep 17 00:00:00 2001
From: Yanfei Xu <yanfei.xu@windriver.com>
Date: Mon, 5 Jul 2021 17:56:58 +0800
Subject: [PATCH] locking/mutex: fix the endless loop when racing against
  mutex.owner

if a race condition happened on mutex.owner after we fetch its value,
atomic_long_try_cmpxchg_acquire/release invoked on &mutex.owner will
return false. Then we need to reassign the temporary variable which
saves mutex.owner value if in loop, or it will lead an endless loop.

Fixes: 9265e48a579d ("locking/mutex: Use try_cmpxchg()")

Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
---
  kernel/locking/mutex.c | 15 ++++++++-------
  1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 5e6a811ac733..ec6b6724c118 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -95,12 +95,12 @@ static inline unsigned long __owner_flags(unsigned 
long owner)

  static inline struct task_struct *__mutex_trylock_common(struct mutex 
*lock, bool handoff)
  {
-       unsigned long owner, curr = (unsigned long)current;
+       unsigned long flags, owner, task, curr = (unsigned long)current;

-       owner = atomic_long_read(&lock->owner);
         for (;;) { /* must loop, can race against a flag */
-               unsigned long flags = __owner_flags(owner);
-               unsigned long task = owner & ~MUTEX_FLAGS;
+               owner = atomic_long_read(&lock->owner);
+               flags = __owner_flags(owner);
+               task = owner & ~MUTEX_FLAGS;

                 if (task) {
                         if (flags & MUTEX_FLAG_PICKUP) {
@@ -231,10 +231,10 @@ __mutex_remove_waiter(struct mutex *lock, struct 
mutex_waiter *waiter)
   */
  static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
  {
-       unsigned long owner = atomic_long_read(&lock->owner);
+       unsigned long owner, new;

         for (;;) {
-               unsigned long new;
+               owner = atomic_long_read(&lock->owner);

                 MUTEX_WARN_ON(__owner_task(owner) != current);
                 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
@@ -1227,8 +1227,9 @@ static noinline void __sched 
__mutex_unlock_slowpath(struct mutex *lock, unsigne
          * Except when HANDOFF, in that case we must not clear the 
owner field,
          * but instead set it to the top waiter.
          */
-       owner = atomic_long_read(&lock->owner);
         for (;;) {
+               owner = atomic_long_read(&lock->owner);
+
                 MUTEX_WARN_ON(__owner_task(owner) != current);
                 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);

-- 
2.29.2

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg()
  2021-07-05 11:59   ` Xu, Yanfei
@ 2021-07-05 14:00     ` Peter Zijlstra
  2021-07-05 14:52       ` Xu, Yanfei
  2021-07-05 15:07       ` [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg() Peter Zijlstra
  0 siblings, 2 replies; 21+ messages in thread
From: Peter Zijlstra @ 2021-07-05 14:00 UTC (permalink / raw)
  To: Xu, Yanfei; +Cc: mingo, longman, boqun.feng, will, linux-kernel

On Mon, Jul 05, 2021 at 07:59:12PM +0800, Xu, Yanfei wrote:
> 
> 
> On 6/30/21 11:35 PM, Peter Zijlstra wrote:
> > For simpler and better code.
> > 
> > Signed-off-by: Peter Zijlstra (Intel)<peterz@infradead.org>
> > ---
> >   kernel/locking/mutex.c |   27 ++++++---------------------
> >   1 file changed, 6 insertions(+), 21 deletions(-)
> 
> Hi Peter,
> 
> I read the mutex codes today, and find there seems something wrong for the
> patch. Should we consider the race condition as blow?
> 
> From 4035f50c96e17cbe3febab768b64da5c000e5b76 Mon Sep 17 00:00:00 2001
> From: Yanfei Xu <yanfei.xu@windriver.com>
> Date: Mon, 5 Jul 2021 17:56:58 +0800
> Subject: [PATCH] locking/mutex: fix the endless loop when racing against
>  mutex.owner
> 
> if a race condition happened on mutex.owner after we fetch its value,
> atomic_long_try_cmpxchg_acquire/release invoked on &mutex.owner will
> return false. Then we need to reassign the temporary variable which
> saves mutex.owner value if in loop, or it will lead an endless loop.

No, when try_cmpxchg() fails it will update oldp. This is the reason old
is now a pointer too.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg()
  2021-07-05 14:00     ` Peter Zijlstra
@ 2021-07-05 14:52       ` Xu, Yanfei
  2021-07-05 15:07       ` [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg() Peter Zijlstra
  1 sibling, 0 replies; 21+ messages in thread
From: Xu, Yanfei @ 2021-07-05 14:52 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: mingo, longman, boqun.feng, will, linux-kernel



On 7/5/21 10:00 PM, Peter Zijlstra wrote:
> [Please note: This e-mail is from an EXTERNAL e-mail address]
> 
> On Mon, Jul 05, 2021 at 07:59:12PM +0800, Xu, Yanfei wrote:
>>
>>
>> On 6/30/21 11:35 PM, Peter Zijlstra wrote:
>>> For simpler and better code.
>>>
>>> Signed-off-by: Peter Zijlstra (Intel)<peterz@infradead.org>
>>> ---
>>>    kernel/locking/mutex.c |   27 ++++++---------------------
>>>    1 file changed, 6 insertions(+), 21 deletions(-)
>>
>> Hi Peter,
>>
>> I read the mutex codes today, and find there seems something wrong for the
>> patch. Should we consider the race condition as blow?
>>
>>  From 4035f50c96e17cbe3febab768b64da5c000e5b76 Mon Sep 17 00:00:00 2001
>> From: Yanfei Xu <yanfei.xu@windriver.com>
>> Date: Mon, 5 Jul 2021 17:56:58 +0800
>> Subject: [PATCH] locking/mutex: fix the endless loop when racing against
>>   mutex.owner
>>
>> if a race condition happened on mutex.owner after we fetch its value,
>> atomic_long_try_cmpxchg_acquire/release invoked on &mutex.owner will
>> return false. Then we need to reassign the temporary variable which
>> saves mutex.owner value if in loop, or it will lead an endless loop.
> 
> No, when try_cmpxchg() fails it will update oldp. This is the reason old
> is now a pointer too.

Got it. Thanks!

Yanfei
> 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
  2021-07-05 14:00     ` Peter Zijlstra
  2021-07-05 14:52       ` Xu, Yanfei
@ 2021-07-05 15:07       ` Peter Zijlstra
  2021-07-05 15:21         ` Will Deacon
                           ` (2 more replies)
  1 sibling, 3 replies; 21+ messages in thread
From: Peter Zijlstra @ 2021-07-05 15:07 UTC (permalink / raw)
  To: Xu, Yanfei; +Cc: mingo, longman, boqun.feng, will, linux-kernel

On Mon, Jul 05, 2021 at 04:00:01PM +0200, Peter Zijlstra wrote:

> No, when try_cmpxchg() fails it will update oldp. This is the reason old
> is now a pointer too.

Since you're not the first person confused by this, does the below
clarify?

---
Subject: Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon Jul  5 17:00:24 CEST 2021

There seems to be a significant amount of confusion around the 'new'
try_cmpxchg(), despite this being more like the C11
atomic_compare_exchange_*() family. Add a few words of clarification
on how cmpxchg() and try_cmpxchg() relate to one another.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 Documentation/atomic_t.txt |   41 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)

--- a/Documentation/atomic_t.txt
+++ b/Documentation/atomic_t.txt
@@ -271,3 +271,44 @@ because it would not order the W part of
 			SC *y, t;
 
 is allowed.
+
+
+CMPXHG vs TRY_CMPXCHG
+---------------------
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new);
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new);
+
+Both provide the same functionality, but try_cmpxchg() can lead to more
+compact code. The functions relate like:
+
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new)
+  {
+    int ret, old = *oldp;
+    ret = atomic_cmpxchg(ptr, old, new);
+    if (ret != old)
+      *oldp = ret;
+    return ret == old;
+  }
+
+and:
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+  {
+    (void)atomic_try_cmpxchg(ptr, &old, new);
+    return old;
+  }
+
+Usage:
+
+  old = atomic_read(&v);			old = atomic_read(&v);
+  for (;;) {					do {
+    new = func(old);				  new = func(old);
+    tmp = atomic_cmpxchg(&v, old, new);		} while (!atomic_try_cmpxchg(&v, &old, new));
+    if (tmp == old)
+      break;
+    old = tmp;
+  }
+
+NB. try_cmpxchg() also generates better code on some platforms (notably x86)
+where the function more closely matches the hardware instruction.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
  2021-07-05 15:07       ` [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg() Peter Zijlstra
@ 2021-07-05 15:21         ` Will Deacon
  2021-07-05 15:25         ` Xu, Yanfei
  2021-07-08  8:42         ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
  2 siblings, 0 replies; 21+ messages in thread
From: Will Deacon @ 2021-07-05 15:21 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Xu, Yanfei, mingo, longman, boqun.feng, linux-kernel

On Mon, Jul 05, 2021 at 05:07:41PM +0200, Peter Zijlstra wrote:
> On Mon, Jul 05, 2021 at 04:00:01PM +0200, Peter Zijlstra wrote:
> 
> > No, when try_cmpxchg() fails it will update oldp. This is the reason old
> > is now a pointer too.
> 
> Since you're not the first person confused by this, does the below
> clarify?
> 
> ---
> Subject: Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
> From: Peter Zijlstra <peterz@infradead.org>
> Date: Mon Jul  5 17:00:24 CEST 2021
> 
> There seems to be a significant amount of confusion around the 'new'
> try_cmpxchg(), despite this being more like the C11
> atomic_compare_exchange_*() family. Add a few words of clarification
> on how cmpxchg() and try_cmpxchg() relate to one another.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>  Documentation/atomic_t.txt |   41 +++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 41 insertions(+)

With the "CMPXHG" typo fixed:

Acked-by: Will Deacon <will@kernel.org>

Will

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
  2021-07-05 15:07       ` [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg() Peter Zijlstra
  2021-07-05 15:21         ` Will Deacon
@ 2021-07-05 15:25         ` Xu, Yanfei
  2021-07-05 17:12           ` Peter Zijlstra
  2021-07-08  8:42         ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
  2 siblings, 1 reply; 21+ messages in thread
From: Xu, Yanfei @ 2021-07-05 15:25 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: mingo, longman, boqun.feng, will, linux-kernel



On 7/5/21 11:07 PM, Peter Zijlstra wrote:
> [Please note: This e-mail is from an EXTERNAL e-mail address]
> 
> On Mon, Jul 05, 2021 at 04:00:01PM +0200, Peter Zijlstra wrote:
> 
>> No, when try_cmpxchg() fails it will update oldp. This is the reason old
>> is now a pointer too.
> 
> Since you're not the first person confused by this, does the below
> clarify?
> 
> ---
> Subject: Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
> From: Peter Zijlstra <peterz@infradead.org>
> Date: Mon Jul  5 17:00:24 CEST 2021
> 
> There seems to be a significant amount of confusion around the 'new'
> try_cmpxchg(), despite this being more like the C11
> atomic_compare_exchange_*() family. Add a few words of clarification
> on how cmpxchg() and try_cmpxchg() relate to one another.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>   Documentation/atomic_t.txt |   41 +++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 41 insertions(+)
> 
> --- a/Documentation/atomic_t.txt
> +++ b/Documentation/atomic_t.txt
> @@ -271,3 +271,44 @@ because it would not order the W part of
>                          SC *y, t;
> 
>   is allowed.
> +
> +
> +CMPXHG vs TRY_CMPXCHG

CMPXHG -> CMPXCHG

> +---------------------
> +
> +  int atomic_cmpxchg(atomic_t *ptr, int old, int new);
> +  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new);
> +
> +Both provide the same functionality, but try_cmpxchg() can lead to more
> +compact code. The functions relate like:
> +
> +  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new)
> +  {
> +    int ret, old = *oldp;
> +    ret = atomic_cmpxchg(ptr, old, new);
> +    if (ret != old)
> +      *oldp = ret;
> +    return ret == old;
> +  }

I tried to search some comments about atomic_try_cmpxchg(), but failed. 
Maybe I missed it. With your this document, it is more clear now.

> +
> +and:
> +
> +  int atomic_cmpxchg(atomic_t *ptr, int old, int new)
> +  {
> +    (void)atomic_try_cmpxchg(ptr, &old, new);
> +    return old;
> +  }
> +
> +Usage:
> +
> +  old = atomic_read(&v);                       old = atomic_read(&v);
> +  for (;;) {                                   do {
> +    new = func(old);                             new = func(old);
> +    tmp = atomic_cmpxchg(&v, old, new);                } while (!atomic_try_cmpxchg(&v, &old, new));

Some unnecessary spaces before "while".

Thanks,
Yanfei

> +    if (tmp == old)
> +      break;
> +    old = tmp;
> +  }
> +
> +NB. try_cmpxchg() also generates better code on some platforms (notably x86)
> +where the function more closely matches the hardware instruction.
> 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
  2021-07-05 15:25         ` Xu, Yanfei
@ 2021-07-05 17:12           ` Peter Zijlstra
  0 siblings, 0 replies; 21+ messages in thread
From: Peter Zijlstra @ 2021-07-05 17:12 UTC (permalink / raw)
  To: Xu, Yanfei; +Cc: mingo, longman, boqun.feng, will, linux-kernel

On Mon, Jul 05, 2021 at 11:25:29PM +0800, Xu, Yanfei wrote:
> > +CMPXHG vs TRY_CMPXCHG
> 
> CMPXHG -> CMPXCHG

Yeah, already fixed. I spotted it a minute after sending :/

> > +---------------------
> > +
> > +  int atomic_cmpxchg(atomic_t *ptr, int old, int new);
> > +  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new);
> > +
> > +Both provide the same functionality, but try_cmpxchg() can lead to more
> > +compact code. The functions relate like:
> > +
> > +  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new)
> > +  {
> > +    int ret, old = *oldp;
> > +    ret = atomic_cmpxchg(ptr, old, new);
> > +    if (ret != old)
> > +      *oldp = ret;
> > +    return ret == old;
> > +  }
> 
> I tried to search some comments about atomic_try_cmpxchg(), but failed.
> Maybe I missed it. With your this document, it is more clear now.

OK, good, thanks!

> > +
> > +and:
> > +
> > +  int atomic_cmpxchg(atomic_t *ptr, int old, int new)
> > +  {
> > +    (void)atomic_try_cmpxchg(ptr, &old, new);
> > +    return old;
> > +  }
> > +
> > +Usage:
> > +
> > +  old = atomic_read(&v);                       old = atomic_read(&v);
> > +  for (;;) {                                   do {
> > +    new = func(old);                             new = func(old);
> > +    tmp = atomic_cmpxchg(&v, old, new);                } while (!atomic_try_cmpxchg(&v, &old, new));
> 
> Some unnecessary spaces before "while".

That's due to the diff prepending the line with "+ " which offsets the
tabstop. If you apply the patch the actual document is fine.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [tip: locking/core] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()
  2021-07-05 15:07       ` [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg() Peter Zijlstra
  2021-07-05 15:21         ` Will Deacon
  2021-07-05 15:25         ` Xu, Yanfei
@ 2021-07-08  8:42         ` tip-bot2 for Peter Zijlstra
  2 siblings, 0 replies; 21+ messages in thread
From: tip-bot2 for Peter Zijlstra @ 2021-07-08  8:42 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: Peter Zijlstra (Intel), Will Deacon, x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     d1bbfd0c7c9f985e57795a7e0cefc209ebf689c0
Gitweb:        https://git.kernel.org/tip/d1bbfd0c7c9f985e57795a7e0cefc209ebf689c0
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Mon, 05 Jul 2021 17:00:24 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Jul 2021 13:53:25 +02:00

Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg()

There seems to be a significant amount of confusion around the new
try_cmpxchg(), despite this being more like the C11
atomic_compare_exchange_*() family. Add a few words of clarification
on how cmpxchg() and try_cmpxchg() relate to one another.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lkml.kernel.org/r/YOMgPeMOmmiK3tXO@hirez.programming.kicks-ass.net
---
 Documentation/atomic_t.txt | 41 +++++++++++++++++++++++++++++++++++++-
 1 file changed, 41 insertions(+)

diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
index 0f1fded..a9c1e2b 100644
--- a/Documentation/atomic_t.txt
+++ b/Documentation/atomic_t.txt
@@ -271,3 +271,44 @@ WRITE_ONCE.  Thus:
 			SC *y, t;
 
 is allowed.
+
+
+CMPXCHG vs TRY_CMPXCHG
+----------------------
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new);
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new);
+
+Both provide the same functionality, but try_cmpxchg() can lead to more
+compact code. The functions relate like:
+
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new)
+  {
+    int ret, old = *oldp;
+    ret = atomic_cmpxchg(ptr, old, new);
+    if (ret != old)
+      *oldp = ret;
+    return ret == old;
+  }
+
+and:
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+  {
+    (void)atomic_try_cmpxchg(ptr, &old, new);
+    return old;
+  }
+
+Usage:
+
+  old = atomic_read(&v);			old = atomic_read(&v);
+  for (;;) {					do {
+    new = func(old);				  new = func(old);
+    tmp = atomic_cmpxchg(&v, old, new);		} while (!atomic_try_cmpxchg(&v, &old, new));
+    if (tmp == old)
+      break;
+    old = tmp;
+  }
+
+NB. try_cmpxchg() also generates better code on some platforms (notably x86)
+where the function more closely matches the hardware instruction.

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [tip: locking/core] locking/mutex: Add MUTEX_WARN_ON
  2021-06-30 15:35 ` [RFC][PATCH 4/4] locking/mutex: Add MUTEX_WARN_ON Peter Zijlstra
@ 2021-07-08  8:42   ` tip-bot2 for Peter Zijlstra
  0 siblings, 0 replies; 21+ messages in thread
From: tip-bot2 for Peter Zijlstra @ 2021-07-08  8:42 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Peter Zijlstra (Intel), Waiman Long, Yanfei Xu, x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     e6b4457b05f36bb9e371f29ab1dd2d97272a1540
Gitweb:        https://git.kernel.org/tip/e6b4457b05f36bb9e371f29ab1dd2d97272a1540
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Wed, 30 Jun 2021 17:35:20 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Jul 2021 13:53:25 +02:00

locking/mutex: Add MUTEX_WARN_ON

Cleanup some #ifdef'fery.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Waiman Long <longman@redhat.com>
Reviewed-by: Yanfei Xu <yanfei.xu@windriver.com>
Link: https://lore.kernel.org/r/20210630154115.020298650@infradead.org
---
 kernel/locking/mutex.c | 30 ++++++++++--------------------
 1 file changed, 10 insertions(+), 20 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index b81ec97..633bf0d 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -32,8 +32,10 @@
 
 #ifdef CONFIG_DEBUG_MUTEXES
 # include "mutex-debug.h"
+# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
 #else
 # include "mutex.h"
+# define MUTEX_WARN_ON(cond)
 #endif
 
 void
@@ -113,9 +115,7 @@ static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, boo
 				break;
 			}
 		} else {
-#ifdef CONFIG_DEBUG_MUTEXES
-			DEBUG_LOCKS_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
-#endif
+			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
 			task = curr;
 		}
 
@@ -218,10 +218,8 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 	for (;;) {
 		unsigned long new;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+		MUTEX_WARN_ON(__owner_task(owner) != current);
+		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
 		new = (owner & MUTEX_FLAG_WAITERS);
 		new |= (unsigned long)task;
@@ -754,9 +752,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
 	 * into 'unlocked' state:
 	 */
 	if (lock->ctx) {
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
-#endif
+		MUTEX_WARN_ON(!lock->ctx->acquired);
 		if (lock->ctx->acquired > 0)
 			lock->ctx->acquired--;
 		lock->ctx = NULL;
@@ -931,9 +927,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
 	might_sleep();
 
-#ifdef CONFIG_DEBUG_MUTEXES
-	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+	MUTEX_WARN_ON(lock->magic != lock);
 
 	ww = container_of(lock, struct ww_mutex, base);
 	if (ww_ctx) {
@@ -1227,10 +1221,8 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
 	 */
 	owner = atomic_long_read(&lock->owner);
 	for (;;) {
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+		MUTEX_WARN_ON(__owner_task(owner) != current);
+		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
 		if (owner & MUTEX_FLAG_HANDOFF)
 			break;
@@ -1396,9 +1388,7 @@ int __sched mutex_trylock(struct mutex *lock)
 {
 	bool locked;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+	MUTEX_WARN_ON(lock->magic != lock);
 
 	locked = __mutex_trylock(lock);
 	if (locked)

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [tip: locking/core] locking/mutex: Introduce __mutex_trylock_or_handoff()
  2021-06-30 15:35 ` [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff() Peter Zijlstra
  2021-06-30 16:30   ` Waiman Long
@ 2021-07-08  8:42   ` tip-bot2 for Peter Zijlstra
  1 sibling, 0 replies; 21+ messages in thread
From: tip-bot2 for Peter Zijlstra @ 2021-07-08  8:42 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Yanfei Xu, Peter Zijlstra (Intel), Waiman Long, x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     ad90880dc9625682a58897cba2ecff657a2aa60b
Gitweb:        https://git.kernel.org/tip/ad90880dc9625682a58897cba2ecff657a2aa60b
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Wed, 30 Jun 2021 17:35:19 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Jul 2021 13:53:25 +02:00

locking/mutex: Introduce __mutex_trylock_or_handoff()

Yanfei reported that it is possible to loose HANDOFF when we race with
mutex_unlock() and end up setting HANDOFF on an unlocked mutex. At
that point anybody can steal it, losing HANDOFF in the process.

If this happens often enough, we can in fact starve the top waiter.

Solve this by folding the 'set HANDOFF' operation into the trylock
operation, such that either we acquire the lock, or it gets HANDOFF
set. This avoids having HANDOFF set on an unlocked mutex.

Reported-by: Yanfei Xu <yanfei.xu@windriver.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Waiman Long <longman@redhat.com>
Reviewed-by: Yanfei Xu <yanfei.xu@windriver.com>
Link: https://lore.kernel.org/r/20210630154114.958507900@infradead.org
---
 kernel/locking/mutex.c | 60 ++++++++++++++++++++++++-----------------
 1 file changed, 36 insertions(+), 24 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 8c3d499..b81ec97 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -91,10 +91,7 @@ static inline unsigned long __owner_flags(unsigned long owner)
 	return owner & MUTEX_FLAGS;
 }
 
-/*
- * Trylock variant that returns the owning task on failure.
- */
-static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
+static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
 {
 	unsigned long owner, curr = (unsigned long)current;
 
@@ -104,39 +101,48 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 		unsigned long task = owner & ~MUTEX_FLAGS;
 
 		if (task) {
-			if (likely(task != curr))
+			if (flags & MUTEX_FLAG_PICKUP) {
+				if (task != curr)
+					break;
+				flags &= ~MUTEX_FLAG_PICKUP;
+			} else if (handoff) {
+				if (flags & MUTEX_FLAG_HANDOFF)
+					break;
+				flags |= MUTEX_FLAG_HANDOFF;
+			} else {
 				break;
-
-			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
-				break;
-
-			flags &= ~MUTEX_FLAG_PICKUP;
+			}
 		} else {
 #ifdef CONFIG_DEBUG_MUTEXES
-			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
+			DEBUG_LOCKS_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
 #endif
+			task = curr;
 		}
 
-		/*
-		 * We set the HANDOFF bit, we must make sure it doesn't live
-		 * past the point where we acquire it. This would be possible
-		 * if we (accidentally) set the bit on an unlocked mutex.
-		 */
-		flags &= ~MUTEX_FLAG_HANDOFF;
-
-		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, curr | flags))
-			return NULL;
+		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
+			if (task == curr)
+				return NULL;
+			break;
+		}
 	}
 
 	return __owner_task(owner);
 }
 
 /*
+ * Trylock or set HANDOFF
+ */
+static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
+{
+	return !__mutex_trylock_common(lock, handoff);
+}
+
+/*
  * Actual trylock that will work on any unlocked state.
  */
 static inline bool __mutex_trylock(struct mutex *lock)
 {
-	return !__mutex_trylock_or_owner(lock);
+	return !__mutex_trylock_common(lock, false);
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
@@ -479,6 +485,14 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 
+/*
+ * Trylock variant that returns the owning task on failure.
+ */
+static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
+{
+	return __mutex_trylock_common(lock, false);
+}
+
 static inline
 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 			    struct mutex_waiter *waiter)
@@ -1018,8 +1032,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		schedule_preempt_disabled();
 
 		first = __mutex_waiter_is_first(lock, &waiter);
-		if (first)
-			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
 
 		set_current_state(state);
 		/*
@@ -1027,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * state back to RUNNING and fall through the next schedule(),
 		 * or we must see its unlock and acquire.
 		 */
-		if (__mutex_trylock(lock) ||
+		if (__mutex_trylock_or_handoff(lock, first) ||
 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
 			break;
 

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [tip: locking/core] locking/mutex: Fix HANDOFF condition
  2021-06-30 15:35 ` [RFC][PATCH 2/4] locking/mutex: Fix HANDOFF condition Peter Zijlstra
@ 2021-07-08  8:42   ` tip-bot2 for Peter Zijlstra
  0 siblings, 0 replies; 21+ messages in thread
From: tip-bot2 for Peter Zijlstra @ 2021-07-08  8:42 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Yanfei Xu, Peter Zijlstra (Intel), Waiman Long, x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     048661a1f963e9517630f080687d48af79ed784c
Gitweb:        https://git.kernel.org/tip/048661a1f963e9517630f080687d48af79ed784c
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Wed, 30 Jun 2021 17:35:18 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Jul 2021 13:53:24 +02:00

locking/mutex: Fix HANDOFF condition

Yanfei reported that setting HANDOFF should not depend on recomputing
@first, only on @first state. Which would then give:

  if (ww_ctx || !first)
    first = __mutex_waiter_is_first(lock, &waiter);
  if (first)
    __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);

But because 'ww_ctx || !first' is basically 'always' and the test for
first is relatively cheap, omit that first branch entirely.

Reported-by: Yanfei Xu <yanfei.xu@windriver.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Waiman Long <longman@redhat.com>
Reviewed-by: Yanfei Xu <yanfei.xu@windriver.com>
Link: https://lore.kernel.org/r/20210630154114.896786297@infradead.org
---
 kernel/locking/mutex.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cab7163..8c3d499 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -909,7 +909,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
 	struct mutex_waiter waiter;
-	bool first = false;
 	struct ww_mutex *ww;
 	int ret;
 
@@ -988,6 +987,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
 	set_current_state(state);
 	for (;;) {
+		bool first;
+
 		/*
 		 * Once we hold wait_lock, we're serialized against
 		 * mutex_unlock() handing the lock off to us, do a trylock
@@ -1016,15 +1017,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		spin_unlock(&lock->wait_lock);
 		schedule_preempt_disabled();
 
-		/*
-		 * ww_mutex needs to always recheck its position since its waiter
-		 * list is not FIFO ordered.
-		 */
-		if (ww_ctx || !first) {
-			first = __mutex_waiter_is_first(lock, &waiter);
-			if (first)
-				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
-		}
+		first = __mutex_waiter_is_first(lock, &waiter);
+		if (first)
+			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
 
 		set_current_state(state);
 		/*

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [tip: locking/core] locking/mutex: Use try_cmpxchg()
  2021-06-30 15:35 ` [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg() Peter Zijlstra
  2021-07-05 11:59   ` Xu, Yanfei
@ 2021-07-08  8:42   ` tip-bot2 for Peter Zijlstra
  1 sibling, 0 replies; 21+ messages in thread
From: tip-bot2 for Peter Zijlstra @ 2021-07-08  8:42 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Peter Zijlstra (Intel), Waiman Long, Yanfei Xu, x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     ab4e4d9f79b2c95ef268985d2a9625a03a73c49a
Gitweb:        https://git.kernel.org/tip/ab4e4d9f79b2c95ef268985d2a9625a03a73c49a
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Wed, 30 Jun 2021 17:35:17 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Jul 2021 13:53:24 +02:00

locking/mutex: Use try_cmpxchg()

For simpler and better code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Waiman Long <longman@redhat.com>
Reviewed-by: Yanfei Xu <yanfei.xu@windriver.com>
Link: https://lore.kernel.org/r/20210630154114.834438545@infradead.org
---
 kernel/locking/mutex.c | 27 ++++++---------------------
 1 file changed, 6 insertions(+), 21 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cb6b112..cab7163 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -100,7 +100,7 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 
 	owner = atomic_long_read(&lock->owner);
 	for (;;) { /* must loop, can race against a flag */
-		unsigned long old, flags = __owner_flags(owner);
+		unsigned long flags = __owner_flags(owner);
 		unsigned long task = owner & ~MUTEX_FLAGS;
 
 		if (task) {
@@ -124,11 +124,8 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 		 */
 		flags &= ~MUTEX_FLAG_HANDOFF;
 
-		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
-		if (old == owner)
+		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, curr | flags))
 			return NULL;
-
-		owner = old;
 	}
 
 	return __owner_task(owner);
@@ -168,10 +165,7 @@ static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 {
 	unsigned long curr = (unsigned long)current;
 
-	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
-		return true;
-
-	return false;
+	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
 }
 #endif
 
@@ -216,7 +210,7 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 	unsigned long owner = atomic_long_read(&lock->owner);
 
 	for (;;) {
-		unsigned long old, new;
+		unsigned long new;
 
 #ifdef CONFIG_DEBUG_MUTEXES
 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
@@ -228,11 +222,8 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 		if (task)
 			new |= MUTEX_FLAG_PICKUP;
 
-		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
-		if (old == owner)
+		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
 			break;
-
-		owner = old;
 	}
 }
 
@@ -1229,8 +1220,6 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
 	 */
 	owner = atomic_long_read(&lock->owner);
 	for (;;) {
-		unsigned long old;
-
 #ifdef CONFIG_DEBUG_MUTEXES
 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
@@ -1239,16 +1228,12 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
 		if (owner & MUTEX_FLAG_HANDOFF)
 			break;
 
-		old = atomic_long_cmpxchg_release(&lock->owner, owner,
-						  __owner_flags(owner));
-		if (old == owner) {
+		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
 			if (owner & MUTEX_FLAG_WAITERS)
 				break;
 
 			return;
 		}
-
-		owner = old;
 	}
 
 	spin_lock(&lock->wait_lock);

^ permalink raw reply related	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2021-07-08  8:42 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-30 15:35 [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Peter Zijlstra
2021-06-30 15:35 ` [RFC][PATCH 1/4] locking/mutex: Use try_cmpxchg() Peter Zijlstra
2021-07-05 11:59   ` Xu, Yanfei
2021-07-05 14:00     ` Peter Zijlstra
2021-07-05 14:52       ` Xu, Yanfei
2021-07-05 15:07       ` [PATCH] Documentation/atomic_t: Document cmpxchg() vs try_cmpxchg() Peter Zijlstra
2021-07-05 15:21         ` Will Deacon
2021-07-05 15:25         ` Xu, Yanfei
2021-07-05 17:12           ` Peter Zijlstra
2021-07-08  8:42         ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2021-07-08  8:42   ` [tip: locking/core] locking/mutex: Use try_cmpxchg() tip-bot2 for Peter Zijlstra
2021-06-30 15:35 ` [RFC][PATCH 2/4] locking/mutex: Fix HANDOFF condition Peter Zijlstra
2021-07-08  8:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2021-06-30 15:35 ` [RFC][PATCH 3/4] locking/mutex: Introduce __mutex_trylock_or_handoff() Peter Zijlstra
2021-06-30 16:30   ` Waiman Long
2021-06-30 18:04     ` Peter Zijlstra
2021-07-08  8:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2021-06-30 15:35 ` [RFC][PATCH 4/4] locking/mutex: Add MUTEX_WARN_ON Peter Zijlstra
2021-07-08  8:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2021-06-30 19:03 ` [RFC][PATCH 0/4] locking/mutex: Some HANDOFF fixes Waiman Long
2021-07-01  2:11 ` Xu, Yanfei

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.