All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
@ 2016-08-10 18:44 Jason Low
  2016-08-10 20:00 ` kbuild test robot
                   ` (7 more replies)
  0 siblings, 8 replies; 16+ messages in thread
From: Jason Low @ 2016-08-10 18:44 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, imre.deak
  Cc: linux-kernel, Jason Low, Jason Low, Waiman Long, Davidlohr Bueso,
	Tim Chen, terry.rudd, Paul E. McKenney

Imre reported an issue where threads are getting starved when trying
to acquire a mutex. Threads acquiring a mutex can get arbitrarily delayed
sleeping on a mutex because other threads can continually steal the lock
in the fastpath and/or through optimistic spinning.

Waiman has developed patches that allow waiters to return to optimistic
spinning, thus reducing the probability that starvation occurs. However,
Imre still sees this starvation problem in the workloads when optimistic
spinning is disabled.

This patch adds an additional boolean to the mutex that gets used in
the CONFIG_SMP && !CONFIG_MUTEX_SPIN_ON_OWNER cases. The flag signifies
whether or not other threads need to yield to a waiter and gets set
when a waiter spends too much time waiting for the mutex. The threshold
is currently set to 16 wakeups, and once the wakeup threshold is exceeded,
other threads must yield to the top waiter. The flag gets cleared
immediately after the top waiter acquires the mutex.

This prevents waiters from getting starved without sacrificing much
much performance, as lock stealing is still allowed and only
temporarily disabled when it is detected that a waiter has been waiting
for too long.

Reported-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Jason Low <jason.low2@hpe.com>
---
v1->v2:
- Addressed Waiman's suggestions of needing the yield_to_waiter
  flag only in the CONFIG_SMP case.

- Make sure to only clear the flag if the thread is the top waiter.

- Refactor code to clear flag into an inline function.

- Rename 'loops' variable name to 'wakeups'.

 include/linux/mutex.h  |  2 ++
 kernel/locking/mutex.c | 83 +++++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 74 insertions(+), 11 deletions(-)

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531..5643a233 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -57,6 +57,8 @@ struct mutex {
 #endif
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 	struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#elif defined(CONFIG_SMP)
+	bool yield_to_waiter; /* Prevent starvation when spinning disabled */
 #endif
 #ifdef CONFIG_DEBUG_MUTEXES
 	void			*magic;
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90d..faf31a0 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -55,6 +55,8 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 	mutex_clear_owner(lock);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 	osq_lock_init(&lock->osq);
+#elif defined(CONFIG_SMP)
+	lock->yield_to_waiter = false;
 #endif
 
 	debug_mutex_init(lock, name, key);
@@ -71,6 +73,9 @@ EXPORT_SYMBOL(__mutex_init);
  */
 __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
 
+
+static inline bool need_yield_to_waiter(struct mutex *lock);
+
 /**
  * mutex_lock - acquire the mutex
  * @lock: the mutex to be acquired
@@ -99,7 +104,10 @@ void __sched mutex_lock(struct mutex *lock)
 	 * The locking fastpath is the 1->0 transition from
 	 * 'unlocked' into 'locked' state.
 	 */
-	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+	if (!need_yield_to_waiter(lock))
+		__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+	else
+		__mutex_lock_slowpath(&lock->count);
 	mutex_set_owner(lock);
 }
 
@@ -398,12 +406,51 @@ done:
 
 	return false;
 }
+
+static inline void do_yield_to_waiter(struct mutex *lock, int *wakeups)
+{
+	return;
+}
+
+static inline void clear_yield_to_waiter(struct mutex *lock)
+{
+	return;
+}
+
+static inline bool need_yield_to_waiter(struct mutex *lock)
+{
+	return false;
+}
+
 #else
 static bool mutex_optimistic_spin(struct mutex *lock,
 				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
 	return false;
 }
+
+#define MUTEX_WAKEUP_THRESHOLD 16
+
+static inline void do_yield_to_waiter(struct mutex *lock, int *wakeups)
+{
+	*wakeups += 1;
+
+	if (*wakeups < MUTEX_WAKEUP_THRESHOLD)
+		return;
+
+	if (lock->yield_to_waiter != true)
+		lock->yield_to_waiter = true;
+}
+
+static inline void clear_yield_to_waiter(struct mutex *lock)
+{
+	lock->yield_to_waiter = false;
+}
+
+static inline bool need_yield_to_waiter(struct mutex *lock)
+{
+	return lock->yield_to_waiter;
+}
 #endif
 
 __visible __used noinline
@@ -510,6 +557,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	struct mutex_waiter waiter;
 	unsigned long flags;
 	int ret;
+	int wakeups = 0;
 
 	if (use_ww_ctx) {
 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
@@ -532,7 +580,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	 * Once more, try to acquire the lock. Only try-lock the mutex if
 	 * it is unlocked to reduce unnecessary xchg() operations.
 	 */
-	if (!mutex_is_locked(lock) &&
+	if (!need_yield_to_waiter(lock) && !mutex_is_locked(lock) &&
 	    (atomic_xchg_acquire(&lock->count, 0) == 1))
 		goto skip_wait;
 
@@ -556,8 +604,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * other waiters. We only attempt the xchg if the count is
 		 * non-negative in order to avoid unnecessary xchg operations:
 		 */
-		if (atomic_read(&lock->count) >= 0 &&
+		if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
+		    atomic_read(&lock->count) >= 0 &&
 		    (atomic_xchg_acquire(&lock->count, -1) == 1))
+			if (wakeups > 1)
+				clear_yield_to_waiter(lock);
+
 			break;
 
 		/*
@@ -581,6 +633,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		spin_unlock_mutex(&lock->wait_lock, flags);
 		schedule_preempt_disabled();
 		spin_lock_mutex(&lock->wait_lock, flags);
+		do_yield_to_waiter(lock, &wakeups);
 	}
 	__set_task_state(task, TASK_RUNNING);
 
@@ -789,10 +842,13 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock);
  */
 int __sched mutex_lock_interruptible(struct mutex *lock)
 {
-	int ret;
+	int ret = 1;
 
 	might_sleep();
-	ret =  __mutex_fastpath_lock_retval(&lock->count);
+
+	if (!need_yield_to_waiter(lock))
+		ret =  __mutex_fastpath_lock_retval(&lock->count);
+
 	if (likely(!ret)) {
 		mutex_set_owner(lock);
 		return 0;
@@ -804,10 +860,13 @@ EXPORT_SYMBOL(mutex_lock_interruptible);
 
 int __sched mutex_lock_killable(struct mutex *lock)
 {
-	int ret;
+	int ret = 1;
 
 	might_sleep();
-	ret = __mutex_fastpath_lock_retval(&lock->count);
+
+	if (!need_yield_to_waiter(lock))
+		ret = __mutex_fastpath_lock_retval(&lock->count);
+
 	if (likely(!ret)) {
 		mutex_set_owner(lock);
 		return 0;
@@ -917,11 +976,12 @@ EXPORT_SYMBOL(mutex_trylock);
 int __sched
 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-	int ret;
+	int ret = 1;
 
 	might_sleep();
 
-	ret = __mutex_fastpath_lock_retval(&lock->base.count);
+	if (!need_yield_to_waiter(lock))
+		ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
 	if (likely(!ret)) {
 		ww_mutex_set_context_fastpath(lock, ctx);
@@ -935,11 +995,12 @@ EXPORT_SYMBOL(__ww_mutex_lock);
 int __sched
 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-	int ret;
+	int ret = 1;
 
 	might_sleep();
 
-	ret = __mutex_fastpath_lock_retval(&lock->base.count);
+	if (!need_yield_to_waiter(lock))
+		ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
 	if (likely(!ret)) {
 		ww_mutex_set_context_fastpath(lock, ctx);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
@ 2016-08-10 20:00 ` kbuild test robot
  2016-08-10 20:01 ` kbuild test robot
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: kbuild test robot @ 2016-08-10 20:00 UTC (permalink / raw)
  To: Jason Low
  Cc: kbuild-all, Peter Zijlstra, Ingo Molnar, imre.deak, linux-kernel,
	Jason Low, Jason Low, Waiman Long, Davidlohr Bueso, Tim Chen,
	terry.rudd, Paul E. McKenney

[-- Attachment #1: Type: text/plain, Size: 3475 bytes --]

Hi Jason,

[auto build test WARNING on tip/locking/core]
[also build test WARNING on v4.8-rc1 next-20160809]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Jason-Low/locking-mutex-Prevent-lock-starvation-when-spinning-is-enabled/20160811-034327
config: m68k-allyesconfig (attached as .config)
compiler: m68k-linux-gcc (GCC) 4.9.0
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=m68k 

All warnings (new ones prefixed by >>):

   kernel/locking/mutex.c: In function 'do_yield_to_waiter':
   kernel/locking/mutex.c:441:10: error: 'struct mutex' has no member named 'yield_to_waiter'
     if (lock->yield_to_waiter != true)
             ^
   kernel/locking/mutex.c:442:7: error: 'struct mutex' has no member named 'yield_to_waiter'
      lock->yield_to_waiter = true;
          ^
   kernel/locking/mutex.c: In function 'clear_yield_to_waiter':
   kernel/locking/mutex.c:447:6: error: 'struct mutex' has no member named 'yield_to_waiter'
     lock->yield_to_waiter = false;
         ^
   kernel/locking/mutex.c: In function 'need_yield_to_waiter':
   kernel/locking/mutex.c:452:13: error: 'struct mutex' has no member named 'yield_to_waiter'
     return lock->yield_to_waiter;
                ^
   kernel/locking/mutex.c: In function '__ww_mutex_lock':
>> kernel/locking/mutex.c:983:7: warning: passing argument 1 of 'need_yield_to_waiter' from incompatible pointer type
     if (!need_yield_to_waiter(lock))
          ^
   kernel/locking/mutex.c:450:20: note: expected 'struct mutex *' but argument is of type 'struct ww_mutex *'
    static inline bool need_yield_to_waiter(struct mutex *lock)
                       ^
   kernel/locking/mutex.c: In function '__ww_mutex_lock_interruptible':
   kernel/locking/mutex.c:1002:7: warning: passing argument 1 of 'need_yield_to_waiter' from incompatible pointer type
     if (!need_yield_to_waiter(lock))
          ^
   kernel/locking/mutex.c:450:20: note: expected 'struct mutex *' but argument is of type 'struct ww_mutex *'
    static inline bool need_yield_to_waiter(struct mutex *lock)
                       ^
   kernel/locking/mutex.c: In function 'need_yield_to_waiter':
   kernel/locking/mutex.c:453:1: warning: control reaches end of non-void function [-Wreturn-type]
    }
    ^

vim +/need_yield_to_waiter +983 kernel/locking/mutex.c

   967		ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
   968		if (ret)
   969			mutex_set_owner(lock);
   970	
   971		return ret;
   972	}
   973	EXPORT_SYMBOL(mutex_trylock);
   974	
   975	#ifndef CONFIG_DEBUG_LOCK_ALLOC
   976	int __sched
   977	__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
   978	{
   979		int ret = 1;
   980	
   981		might_sleep();
   982	
 > 983		if (!need_yield_to_waiter(lock))
   984			ret = __mutex_fastpath_lock_retval(&lock->base.count);
   985	
   986		if (likely(!ret)) {
   987			ww_mutex_set_context_fastpath(lock, ctx);
   988			mutex_set_owner(&lock->base);
   989		} else
   990			ret = __ww_mutex_lock_slowpath(lock, ctx);
   991		return ret;

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 37707 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
  2016-08-10 20:00 ` kbuild test robot
@ 2016-08-10 20:01 ` kbuild test robot
  2016-08-16 20:19   ` Jason Low
  2016-08-10 22:07 ` Jason Low
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: kbuild test robot @ 2016-08-10 20:01 UTC (permalink / raw)
  To: Jason Low
  Cc: kbuild-all, Peter Zijlstra, Ingo Molnar, imre.deak, linux-kernel,
	Jason Low, Jason Low, Waiman Long, Davidlohr Bueso, Tim Chen,
	terry.rudd, Paul E. McKenney

[-- Attachment #1: Type: text/plain, Size: 8253 bytes --]

Hi Jason,

[auto build test ERROR on tip/locking/core]
[also build test ERROR on v4.8-rc1 next-20160809]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Jason-Low/locking-mutex-Prevent-lock-starvation-when-spinning-is-enabled/20160811-034327
config: x86_64-randconfig-x013-201632 (attached as .config)
compiler: gcc-6 (Debian 6.1.1-9) 6.1.1 20160705
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All error/warnings (new ones prefixed by >>):

   kernel/locking/mutex.c: In function 'do_yield_to_waiter':
>> kernel/locking/mutex.c:441:10: error: 'struct mutex' has no member named 'yield_to_waiter'
     if (lock->yield_to_waiter != true)
             ^~
   kernel/locking/mutex.c:442:7: error: 'struct mutex' has no member named 'yield_to_waiter'
      lock->yield_to_waiter = true;
          ^~
   kernel/locking/mutex.c: In function 'clear_yield_to_waiter':
   kernel/locking/mutex.c:447:6: error: 'struct mutex' has no member named 'yield_to_waiter'
     lock->yield_to_waiter = false;
         ^~
   kernel/locking/mutex.c: In function 'need_yield_to_waiter':
   kernel/locking/mutex.c:452:13: error: 'struct mutex' has no member named 'yield_to_waiter'
     return lock->yield_to_waiter;
                ^~
   kernel/locking/mutex.c: In function '__mutex_lock_common':
>> kernel/locking/mutex.c:607:3: warning: this 'if' clause does not guard... [-Wmisleading-indentation]
      if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
      ^~
   kernel/locking/mutex.c:613:4: note: ...this statement, but the latter is misleadingly indented as if it is guarded by the 'if'
       break;
       ^~~~~
   kernel/locking/mutex.c: In function 'need_yield_to_waiter':
>> kernel/locking/mutex.c:453:1: warning: control reaches end of non-void function [-Wreturn-type]
    }
    ^

vim +441 kernel/locking/mutex.c

   435	{
   436		*wakeups += 1;
   437	
   438		if (*wakeups < MUTEX_WAKEUP_THRESHOLD)
   439			return;
   440	
 > 441		if (lock->yield_to_waiter != true)
   442			lock->yield_to_waiter = true;
   443	}
   444	
   445	static inline void clear_yield_to_waiter(struct mutex *lock)
   446	{
 > 447		lock->yield_to_waiter = false;
   448	}
   449	
   450	static inline bool need_yield_to_waiter(struct mutex *lock)
   451	{
   452		return lock->yield_to_waiter;
 > 453	}
   454	#endif
   455	
   456	__visible __used noinline
   457	void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
   458	
   459	/**
   460	 * mutex_unlock - release the mutex
   461	 * @lock: the mutex to be released
   462	 *
   463	 * Unlock a mutex that has been locked by this task previously.
   464	 *
   465	 * This function must not be used in interrupt context. Unlocking
   466	 * of a not locked mutex is not allowed.
   467	 *
   468	 * This function is similar to (but not equivalent to) up().
   469	 */
   470	void __sched mutex_unlock(struct mutex *lock)
   471	{
   472		/*
   473		 * The unlocking fastpath is the 0->1 transition from 'locked'
   474		 * into 'unlocked' state:
   475		 */
   476	#ifndef CONFIG_DEBUG_MUTEXES
   477		/*
   478		 * When debugging is enabled we must not clear the owner before time,
   479		 * the slow path will always be taken, and that clears the owner field
   480		 * after verifying that it was indeed current.
   481		 */
   482		mutex_clear_owner(lock);
   483	#endif
   484		__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
   485	}
   486	
   487	EXPORT_SYMBOL(mutex_unlock);
   488	
   489	/**
   490	 * ww_mutex_unlock - release the w/w mutex
   491	 * @lock: the mutex to be released
   492	 *
   493	 * Unlock a mutex that has been locked by this task previously with any of the
   494	 * ww_mutex_lock* functions (with or without an acquire context). It is
   495	 * forbidden to release the locks after releasing the acquire context.
   496	 *
   497	 * This function must not be used in interrupt context. Unlocking
   498	 * of a unlocked mutex is not allowed.
   499	 */
   500	void __sched ww_mutex_unlock(struct ww_mutex *lock)
   501	{
   502		/*
   503		 * The unlocking fastpath is the 0->1 transition from 'locked'
   504		 * into 'unlocked' state:
   505		 */
   506		if (lock->ctx) {
   507	#ifdef CONFIG_DEBUG_MUTEXES
   508			DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
   509	#endif
   510			if (lock->ctx->acquired > 0)
   511				lock->ctx->acquired--;
   512			lock->ctx = NULL;
   513		}
   514	
   515	#ifndef CONFIG_DEBUG_MUTEXES
   516		/*
   517		 * When debugging is enabled we must not clear the owner before time,
   518		 * the slow path will always be taken, and that clears the owner field
   519		 * after verifying that it was indeed current.
   520		 */
   521		mutex_clear_owner(&lock->base);
   522	#endif
   523		__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
   524	}
   525	EXPORT_SYMBOL(ww_mutex_unlock);
   526	
   527	static inline int __sched
   528	__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
   529	{
   530		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
   531		struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
   532	
   533		if (!hold_ctx)
   534			return 0;
   535	
   536		if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
   537		    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
   538	#ifdef CONFIG_DEBUG_MUTEXES
   539			DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
   540			ctx->contending_lock = ww;
   541	#endif
   542			return -EDEADLK;
   543		}
   544	
   545		return 0;
   546	}
   547	
   548	/*
   549	 * Lock a mutex (possibly interruptible), slowpath:
   550	 */
   551	static __always_inline int __sched
   552	__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
   553			    struct lockdep_map *nest_lock, unsigned long ip,
   554			    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
   555	{
   556		struct task_struct *task = current;
   557		struct mutex_waiter waiter;
   558		unsigned long flags;
   559		int ret;
   560		int wakeups = 0;
   561	
   562		if (use_ww_ctx) {
   563			struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
   564			if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
   565				return -EALREADY;
   566		}
   567	
   568		preempt_disable();
   569		mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
   570	
   571		if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
   572			/* got the lock, yay! */
   573			preempt_enable();
   574			return 0;
   575		}
   576	
   577		spin_lock_mutex(&lock->wait_lock, flags);
   578	
   579		/*
   580		 * Once more, try to acquire the lock. Only try-lock the mutex if
   581		 * it is unlocked to reduce unnecessary xchg() operations.
   582		 */
   583		if (!need_yield_to_waiter(lock) && !mutex_is_locked(lock) &&
   584		    (atomic_xchg_acquire(&lock->count, 0) == 1))
   585			goto skip_wait;
   586	
   587		debug_mutex_lock_common(lock, &waiter);
   588		debug_mutex_add_waiter(lock, &waiter, task);
   589	
   590		/* add waiting tasks to the end of the waitqueue (FIFO): */
   591		list_add_tail(&waiter.list, &lock->wait_list);
   592		waiter.task = task;
   593	
   594		lock_contended(&lock->dep_map, ip);
   595	
   596		for (;;) {
   597			/*
   598			 * Lets try to take the lock again - this is needed even if
   599			 * we get here for the first time (shortly after failing to
   600			 * acquire the lock), to make sure that we get a wakeup once
   601			 * it's unlocked. Later on, if we sleep, this is the
   602			 * operation that gives us the lock. We xchg it to -1, so
   603			 * that when we release the lock, we properly wake up the
   604			 * other waiters. We only attempt the xchg if the count is
   605			 * non-negative in order to avoid unnecessary xchg operations:
   606			 */
 > 607			if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
   608			    atomic_read(&lock->count) >= 0 &&
   609			    (atomic_xchg_acquire(&lock->count, -1) == 1))
   610				if (wakeups > 1)

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 25495 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
  2016-08-10 20:00 ` kbuild test robot
  2016-08-10 20:01 ` kbuild test robot
@ 2016-08-10 22:07 ` Jason Low
  2016-08-11  2:30 ` Jason Low
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Jason Low @ 2016-08-10 22:07 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: jason.low2, Ingo Molnar, imre.deak, linux-kernel, Waiman Long,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On Wed, 2016-08-10 at 11:44 -0700, Jason Low wrote:
> Imre reported an issue where threads are getting starved when trying
> to acquire a mutex. Threads acquiring a mutex can get arbitrarily delayed
> sleeping on a mutex because other threads can continually steal the lock
> in the fastpath and/or through optimistic spinning.
> 
> Waiman has developed patches that allow waiters to return to optimistic
> spinning, thus reducing the probability that starvation occurs. However,
> Imre still sees this starvation problem in the workloads when optimistic
> spinning is disabled.
> 
> This patch adds an additional boolean to the mutex that gets used in
> the CONFIG_SMP && !CONFIG_MUTEX_SPIN_ON_OWNER cases. The flag signifies
> whether or not other threads need to yield to a waiter and gets set
> when a waiter spends too much time waiting for the mutex. The threshold
> is currently set to 16 wakeups, and once the wakeup threshold is exceeded,
> other threads must yield to the top waiter. The flag gets cleared
> immediately after the top waiter acquires the mutex.

Just noticed that the patch title mentions "when spinning is enabled".

The title should really be:

"locking/mutex: Prevent lock starvation when spinning is disabled"

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
                   ` (2 preceding siblings ...)
  2016-08-10 22:07 ` Jason Low
@ 2016-08-11  2:30 ` Jason Low
  2016-08-11 15:40 ` Waiman Long
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Jason Low @ 2016-08-11  2:30 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: jason.low2, Ingo Molnar, imre.deak, linux-kernel, Waiman Long,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On Wed, 2016-08-10 at 11:44 -0700, Jason Low wrote:
> @@ -917,11 +976,12 @@ EXPORT_SYMBOL(mutex_trylock);
>  int __sched
>  __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
>  {
> -	int ret;
> +	int ret = 1;
>  
>  	might_sleep();
>  
> -	ret = __mutex_fastpath_lock_retval(&lock->base.count);
> +	if (!need_yield_to_waiter(lock))
> +		ret = __mutex_fastpath_lock_retval(&lock->base.count);
>  
>  	if (likely(!ret)) {
>  		ww_mutex_set_context_fastpath(lock, ctx);
> @@ -935,11 +995,12 @@ EXPORT_SYMBOL(__ww_mutex_lock);
>  int __sched
>  __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
>  {
> -	int ret;
> +	int ret = 1;
>  
>  	might_sleep();
>  
> -	ret = __mutex_fastpath_lock_retval(&lock->base.count);
> +	if (!need_yield_to_waiter(lock))

And we would need to pass &lock->base instead of lock since lock is
struct ww_mutex * here.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
                   ` (3 preceding siblings ...)
  2016-08-11  2:30 ` Jason Low
@ 2016-08-11 15:40 ` Waiman Long
  2016-08-16 19:44   ` Jason Low
  2016-08-17  1:41 ` Wanpeng Li
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Waiman Long @ 2016-08-11 15:40 UTC (permalink / raw)
  To: Jason Low
  Cc: Peter Zijlstra, Ingo Molnar, imre.deak, linux-kernel, Jason Low,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On 08/10/2016 02:44 PM, Jason Low wrote:
> Imre reported an issue where threads are getting starved when trying
> to acquire a mutex. Threads acquiring a mutex can get arbitrarily delayed
> sleeping on a mutex because other threads can continually steal the lock
> in the fastpath and/or through optimistic spinning.
>
> Waiman has developed patches that allow waiters to return to optimistic
> spinning, thus reducing the probability that starvation occurs. However,
> Imre still sees this starvation problem in the workloads when optimistic
> spinning is disabled.
>
> This patch adds an additional boolean to the mutex that gets used in
> the CONFIG_SMP&&  !CONFIG_MUTEX_SPIN_ON_OWNER cases. The flag signifies
> whether or not other threads need to yield to a waiter and gets set
> when a waiter spends too much time waiting for the mutex. The threshold
> is currently set to 16 wakeups, and once the wakeup threshold is exceeded,
> other threads must yield to the top waiter. The flag gets cleared
> immediately after the top waiter acquires the mutex.
>
> This prevents waiters from getting starved without sacrificing much
> much performance, as lock stealing is still allowed and only
> temporarily disabled when it is detected that a waiter has been waiting
> for too long.
>
> Reported-by: Imre Deak<imre.deak@intel.com>
> Signed-off-by: Jason Low<jason.low2@hpe.com>
> ---
> v1->v2:
> - Addressed Waiman's suggestions of needing the yield_to_waiter
>    flag only in the CONFIG_SMP case.
>
> - Make sure to only clear the flag if the thread is the top waiter.
>
> - Refactor code to clear flag into an inline function.
>
> - Rename 'loops' variable name to 'wakeups'.
>
>   include/linux/mutex.h  |  2 ++
>   kernel/locking/mutex.c | 83 +++++++++++++++++++++++++++++++++++++++++++-------
>   2 files changed, 74 insertions(+), 11 deletions(-)
>
> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> index 2cb7531..5643a233 100644
> --- a/include/linux/mutex.h
> +++ b/include/linux/mutex.h
> @@ -57,6 +57,8 @@ struct mutex {
>   #endif
>   #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
>   	struct optimistic_spin_queue osq; /* Spinner MCS lock */
> +#elif defined(CONFIG_SMP)
> +	bool yield_to_waiter; /* Prevent starvation when spinning disabled */
>   #endif
>   #ifdef CONFIG_DEBUG_MUTEXES
>   	void			*magic;
> diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
> index a70b90d..faf31a0 100644
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -55,6 +55,8 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
>   	mutex_clear_owner(lock);
>   #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
>   	osq_lock_init(&lock->osq);
> +#elif defined(CONFIG_SMP)
> +	lock->yield_to_waiter = false;
>   #endif
>
>   	debug_mutex_init(lock, name, key);
> @@ -71,6 +73,9 @@ EXPORT_SYMBOL(__mutex_init);
>    */
>   __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
>
> +
> +static inline bool need_yield_to_waiter(struct mutex *lock);
> +
>   /**
>    * mutex_lock - acquire the mutex
>    * @lock: the mutex to be acquired
> @@ -99,7 +104,10 @@ void __sched mutex_lock(struct mutex *lock)
>   	 * The locking fastpath is the 1->0 transition from
>   	 * 'unlocked' into 'locked' state.
>   	 */
> -	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
> +	if (!need_yield_to_waiter(lock))
> +		__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
> +	else
> +		__mutex_lock_slowpath(&lock->count);
>   	mutex_set_owner(lock);
>   }
>
> @@ -398,12 +406,51 @@ done:
>
>   	return false;
>   }
> +
> +static inline void do_yield_to_waiter(struct mutex *lock, int *wakeups)
> +{
> +	return;
> +}
> +
> +static inline void clear_yield_to_waiter(struct mutex *lock)
> +{
> +	return;
> +}
> +
> +static inline bool need_yield_to_waiter(struct mutex *lock)
> +{
> +	return false;
> +}
> +
>   #else
>   static bool mutex_optimistic_spin(struct mutex *lock,
>   				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
>   {
>   	return false;
>   }
> +
> +#define MUTEX_WAKEUP_THRESHOLD 16
> +
> +static inline void do_yield_to_waiter(struct mutex *lock, int *wakeups)
> +{
> +	*wakeups += 1;
> +
> +	if (*wakeups<  MUTEX_WAKEUP_THRESHOLD)
> +		return;
> +
> +	if (lock->yield_to_waiter != true)
> +		lock->yield_to_waiter = true;
> +}
> +
> +static inline void clear_yield_to_waiter(struct mutex *lock)
> +{
> +	lock->yield_to_waiter = false;
> +}
> +
> +static inline bool need_yield_to_waiter(struct mutex *lock)
> +{
> +	return lock->yield_to_waiter;
> +}
>   #endif
>
>   _

The *yield* helper functions should be in a separate conditional 
compilation block as the declaration of yield_to_waiter may not match 
the helper functions with certain combination of config variables.

Something like

#if !defined(CONFIG_MUTEX_SPIN_ON_OWNER) && defined(CONFIG_SMP)
...
#else
...
#endif

Cheers,
Longman

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-11 15:40 ` Waiman Long
@ 2016-08-16 19:44   ` Jason Low
  0 siblings, 0 replies; 16+ messages in thread
From: Jason Low @ 2016-08-16 19:44 UTC (permalink / raw)
  To: Waiman Long
  Cc: jason.low2, Peter Zijlstra, Ingo Molnar, imre.deak, linux-kernel,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney,
	jason.low2

On Thu, 2016-08-11 at 11:40 -0400, Waiman Long wrote:
> On 08/10/2016 02:44 PM, Jason Low wrote:
> > +static inline void do_yield_to_waiter(struct mutex *lock, int *wakeups)
> > +{
> > +	return;
> > +}
> > +
> > +static inline void clear_yield_to_waiter(struct mutex *lock)
> > +{
> > +	return;
> > +}
> > +
> > +static inline bool need_yield_to_waiter(struct mutex *lock)
> > +{
> > +	return false;
> > +}
> > +
> >   #else
> >   static bool mutex_optimistic_spin(struct mutex *lock,
> >   				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
> >   {
> >   	return false;
> >   }
> > +
> > +#define MUTEX_WAKEUP_THRESHOLD 16
> > +
> > +static inline void do_yield_to_waiter(struct mutex *lock, int *wakeups)
> > +{
> > +	*wakeups += 1;
> > +
> > +	if (*wakeups<  MUTEX_WAKEUP_THRESHOLD)
> > +		return;
> > +
> > +	if (lock->yield_to_waiter != true)
> > +		lock->yield_to_waiter = true;
> > +}
> > +
> > +static inline void clear_yield_to_waiter(struct mutex *lock)
> > +{
> > +	lock->yield_to_waiter = false;
> > +}
> > +
> > +static inline bool need_yield_to_waiter(struct mutex *lock)
> > +{
> > +	return lock->yield_to_waiter;
> > +}
> >   #endif
> >
> >   _
> 
> The *yield* helper functions should be in a separate conditional 
> compilation block as the declaration of yield_to_waiter may not match 
> the helper functions with certain combination of config variables.
> 
> Something like
> 
> #if !defined(CONFIG_MUTEX_SPIN_ON_OWNER) && defined(CONFIG_SMP)
> ...
> #else
> ...
> #endif

Right, we will need to incorporate the CONFIG_SMP logic when defining
these functions here, otherwise they would be undefined in the !SMP
case. 

Thanks,
Jason

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 20:01 ` kbuild test robot
@ 2016-08-16 20:19   ` Jason Low
  0 siblings, 0 replies; 16+ messages in thread
From: Jason Low @ 2016-08-16 20:19 UTC (permalink / raw)
  To: kbuild test robot
  Cc: jason.low2, kbuild-all, Peter Zijlstra, Ingo Molnar, imre.deak,
	linux-kernel, Waiman Long, Davidlohr Bueso, Tim Chen, terry.rudd,
	Paul E. McKenney, jason.low2

On Thu, 2016-08-11 at 04:01 +0800, kbuild test robot wrote:
> Hi Jason,
> 
> [auto build test ERROR on tip/locking/core]
> [also build test ERROR on v4.8-rc1 next-20160809]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
> 
> url:    https://github.com/0day-ci/linux/commits/Jason-Low/locking-mutex-Prevent-lock-starvation-when-spinning-is-enabled/20160811-034327
> config: x86_64-randconfig-x013-201632 (attached as .config)
> compiler: gcc-6 (Debian 6.1.1-9) 6.1.1 20160705
> reproduce:
>         # save the attached .config to linux build tree
>         make ARCH=x86_64 
> 
> All error/warnings (new ones prefixed by >>):
> 
>    kernel/locking/mutex.c: In function 'do_yield_to_waiter':
> >> kernel/locking/mutex.c:441:10: error: 'struct mutex' has no member named 'yield_to_waiter'
>      if (lock->yield_to_waiter != true)
>              ^~
>    kernel/locking/mutex.c:442:7: error: 'struct mutex' has no member named 'yield_to_waiter'
>       lock->yield_to_waiter = true;
>           ^~
>    kernel/locking/mutex.c: In function 'clear_yield_to_waiter':
>    kernel/locking/mutex.c:447:6: error: 'struct mutex' has no member named 'yield_to_waiter'
>      lock->yield_to_waiter = false;
>          ^~
>    kernel/locking/mutex.c: In function 'need_yield_to_waiter':
>    kernel/locking/mutex.c:452:13: error: 'struct mutex' has no member named 'yield_to_waiter'
>      return lock->yield_to_waiter;
>                 ^~

These compilation errors occur in the !SMP case where the
yield_to_waiter variable does not get defined in the mutex structure,
but the waiter yield functions still make use of the variable.

They should be addressed by also checking for #if defined(CONFIG_SMP)
when defining those functions as Waiman had suggested.

Thanks,
Jason

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
                   ` (4 preceding siblings ...)
  2016-08-11 15:40 ` Waiman Long
@ 2016-08-17  1:41 ` Wanpeng Li
  2016-08-17 18:30   ` Jason Low
  2016-08-18 14:14 ` Peter Zijlstra
  2016-08-18 14:27 ` Peter Zijlstra
  7 siblings, 1 reply; 16+ messages in thread
From: Wanpeng Li @ 2016-08-17  1:41 UTC (permalink / raw)
  To: Jason Low
  Cc: Peter Zijlstra, Ingo Molnar, imre.deak, linux-kernel, Jason Low,
	Waiman Long, Davidlohr Bueso, Tim Chen, terry.rudd,
	Paul E. McKenney

2016-08-11 2:44 GMT+08:00 Jason Low <jason.low2@hpe.com>:
> Imre reported an issue where threads are getting starved when trying
> to acquire a mutex. Threads acquiring a mutex can get arbitrarily delayed
> sleeping on a mutex because other threads can continually steal the lock
> in the fastpath and/or through optimistic spinning.
>
> Waiman has developed patches that allow waiters to return to optimistic
> spinning, thus reducing the probability that starvation occurs. However,
> Imre still sees this starvation problem in the workloads when optimistic
> spinning is disabled.
>
> This patch adds an additional boolean to the mutex that gets used in
> the CONFIG_SMP && !CONFIG_MUTEX_SPIN_ON_OWNER cases. The flag signifies
> whether or not other threads need to yield to a waiter and gets set
> when a waiter spends too much time waiting for the mutex. The threshold
> is currently set to 16 wakeups, and once the wakeup threshold is exceeded,
> other threads must yield to the top waiter. The flag gets cleared
> immediately after the top waiter acquires the mutex.

There is a subtle difference between this patch and Waiman's. Waiman's
patch will boost any waiter-spinner which is woken up, however, this
patch will boost the top waiter once the number of any waiter-spinners
woken up reaches the threshold. We can't get any benefit if the
resource holder which top waiter is waiting for still not release the
resource.

Regards,
Wanpeng Li

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-17  1:41 ` Wanpeng Li
@ 2016-08-17 18:30   ` Jason Low
  2016-08-18  0:38     ` Wanpeng Li
  0 siblings, 1 reply; 16+ messages in thread
From: Jason Low @ 2016-08-17 18:30 UTC (permalink / raw)
  To: Wanpeng Li
  Cc: jason.low2, Peter Zijlstra, Ingo Molnar, imre.deak, linux-kernel,
	Waiman Long, Davidlohr Bueso, Tim Chen, terry.rudd,
	Paul E. McKenney, jason.low2

Hi Wanpeng,

On Wed, 2016-08-17 at 09:41 +0800, Wanpeng Li wrote:
> 2016-08-11 2:44 GMT+08:00 Jason Low <jason.low2@hpe.com>:
> > Imre reported an issue where threads are getting starved when trying
> > to acquire a mutex. Threads acquiring a mutex can get arbitrarily delayed
> > sleeping on a mutex because other threads can continually steal the lock
> > in the fastpath and/or through optimistic spinning.
> >
> > Waiman has developed patches that allow waiters to return to optimistic
> > spinning, thus reducing the probability that starvation occurs. However,
> > Imre still sees this starvation problem in the workloads when optimistic
> > spinning is disabled.
> >
> > This patch adds an additional boolean to the mutex that gets used in
> > the CONFIG_SMP && !CONFIG_MUTEX_SPIN_ON_OWNER cases. The flag signifies
> > whether or not other threads need to yield to a waiter and gets set
> > when a waiter spends too much time waiting for the mutex. The threshold
> > is currently set to 16 wakeups, and once the wakeup threshold is exceeded,
> > other threads must yield to the top waiter. The flag gets cleared
> > immediately after the top waiter acquires the mutex.
> 
> There is a subtle difference between this patch and Waiman's. Waiman's
> patch will boost any waiter-spinner which is woken up, however, this
> patch will boost the top waiter once the number of any waiter-spinners
> woken up reaches the threshold.

Correct, since when spinning is disabled, we still want to generally
allow other threads to steal the lock even if there are waiters in order
to keep performance good, and only yield the lock when a waiter is
getting 'starved'.

> We can't get any benefit if the
> resource holder which top waiter is waiting for still not release the
> resource.

If the resource holder does not release the resource, that sounds like
an issue with the lock holder.

Unless you're referring to how this doesn't provide immediate benefit to
the top waiter, in which case, I think that is okay since the goal of
the patch is to prevent starvation. We tried disabling 'lock stealing'
anytime there are waiters and that proved to reduce performance by quite
a bit in some workloads.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-17 18:30   ` Jason Low
@ 2016-08-18  0:38     ` Wanpeng Li
  0 siblings, 0 replies; 16+ messages in thread
From: Wanpeng Li @ 2016-08-18  0:38 UTC (permalink / raw)
  To: Jason Low
  Cc: Peter Zijlstra, Ingo Molnar, imre.deak, linux-kernel,
	Waiman Long, Davidlohr Bueso, Tim Chen, terry.rudd,
	Paul E. McKenney, Jason Low

2016-08-18 2:30 GMT+08:00 Jason Low <jason.low2@hpe.com>:
> Hi Wanpeng,
>
> On Wed, 2016-08-17 at 09:41 +0800, Wanpeng Li wrote:
>> 2016-08-11 2:44 GMT+08:00 Jason Low <jason.low2@hpe.com>:
>> > Imre reported an issue where threads are getting starved when trying
>> > to acquire a mutex. Threads acquiring a mutex can get arbitrarily delayed
>> > sleeping on a mutex because other threads can continually steal the lock
>> > in the fastpath and/or through optimistic spinning.
>> >
>> > Waiman has developed patches that allow waiters to return to optimistic
>> > spinning, thus reducing the probability that starvation occurs. However,
>> > Imre still sees this starvation problem in the workloads when optimistic
>> > spinning is disabled.
>> >
>> > This patch adds an additional boolean to the mutex that gets used in
>> > the CONFIG_SMP && !CONFIG_MUTEX_SPIN_ON_OWNER cases. The flag signifies
>> > whether or not other threads need to yield to a waiter and gets set
>> > when a waiter spends too much time waiting for the mutex. The threshold
>> > is currently set to 16 wakeups, and once the wakeup threshold is exceeded,
>> > other threads must yield to the top waiter. The flag gets cleared
>> > immediately after the top waiter acquires the mutex.
>>
>> There is a subtle difference between this patch and Waiman's. Waiman's
>> patch will boost any waiter-spinner which is woken up, however, this
>> patch will boost the top waiter once the number of any waiter-spinners
>> woken up reaches the threshold.
>
> Correct, since when spinning is disabled, we still want to generally
> allow other threads to steal the lock even if there are waiters in order
> to keep performance good, and only yield the lock when a waiter is
> getting 'starved'.
>
>> We can't get any benefit if the
>> resource holder which top waiter is waiting for still not release the
>> resource.
>
> If the resource holder does not release the resource, that sounds like
> an issue with the lock holder.
>
> Unless you're referring to how this doesn't provide immediate benefit to
> the top waiter,

Yes.

> in which case, I think that is okay since the goal of
> the patch is to prevent starvation. We tried disabling 'lock stealing'
> anytime there are waiters and that proved to reduce performance by quite
> a bit in some workloads.

Thanks for the clarification. :)

Regards,
Wanpeng Li

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
                   ` (5 preceding siblings ...)
  2016-08-17  1:41 ` Wanpeng Li
@ 2016-08-18 14:14 ` Peter Zijlstra
  2016-08-18 15:22   ` Waiman Long
  2016-08-18 14:27 ` Peter Zijlstra
  7 siblings, 1 reply; 16+ messages in thread
From: Peter Zijlstra @ 2016-08-18 14:14 UTC (permalink / raw)
  To: Jason Low
  Cc: Ingo Molnar, imre.deak, linux-kernel, Jason Low, Waiman Long,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On Wed, Aug 10, 2016 at 11:44:08AM -0700, Jason Low wrote:
> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> index 2cb7531..5643a233 100644
> --- a/include/linux/mutex.h
> +++ b/include/linux/mutex.h
> @@ -57,6 +57,8 @@ struct mutex {
>  #endif
>  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
>  	struct optimistic_spin_queue osq; /* Spinner MCS lock */
> +#elif defined(CONFIG_SMP)
> +	bool yield_to_waiter; /* Prevent starvation when spinning disabled */
>  #endif
>  #ifdef CONFIG_DEBUG_MUTEXES
>  	void			*magic;

Isn't this also possible on !SMP && PREEMPT ?

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
                   ` (6 preceding siblings ...)
  2016-08-18 14:14 ` Peter Zijlstra
@ 2016-08-18 14:27 ` Peter Zijlstra
  2016-08-18 15:18   ` Peter Zijlstra
  7 siblings, 1 reply; 16+ messages in thread
From: Peter Zijlstra @ 2016-08-18 14:27 UTC (permalink / raw)
  To: Jason Low
  Cc: Ingo Molnar, imre.deak, linux-kernel, Jason Low, Waiman Long,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On Wed, Aug 10, 2016 at 11:44:08AM -0700, Jason Low wrote:
> @@ -556,8 +604,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
>  		 * other waiters. We only attempt the xchg if the count is
>  		 * non-negative in order to avoid unnecessary xchg operations:
>  		 */
> -		if (atomic_read(&lock->count) >= 0 &&
> +		if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
> +		    atomic_read(&lock->count) >= 0 &&
>  		    (atomic_xchg_acquire(&lock->count, -1) == 1))
> +			if (wakeups > 1)
> +				clear_yield_to_waiter(lock);
> +
>  			break;
>  
>  		/*

There's some { } gone missing there...

Also, I think I'll change it to avoid that extra wakeups > 1 condition..

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-18 14:27 ` Peter Zijlstra
@ 2016-08-18 15:18   ` Peter Zijlstra
  2016-08-18 15:30     ` Peter Zijlstra
  0 siblings, 1 reply; 16+ messages in thread
From: Peter Zijlstra @ 2016-08-18 15:18 UTC (permalink / raw)
  To: Jason Low
  Cc: Ingo Molnar, imre.deak, linux-kernel, Jason Low, Waiman Long,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On Thu, Aug 18, 2016 at 04:27:35PM +0200, Peter Zijlstra wrote:
> On Wed, Aug 10, 2016 at 11:44:08AM -0700, Jason Low wrote:
> > @@ -556,8 +604,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> >  		 * other waiters. We only attempt the xchg if the count is
> >  		 * non-negative in order to avoid unnecessary xchg operations:
> >  		 */
> > -		if (atomic_read(&lock->count) >= 0 &&
> > +		if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
> > +		    atomic_read(&lock->count) >= 0 &&
> >  		    (atomic_xchg_acquire(&lock->count, -1) == 1))
> > +			if (wakeups > 1)
> > +				clear_yield_to_waiter(lock);
> > +
> >  			break;
> >  
> >  		/*
> 
> There's some { } gone missing there...
> 
> Also, I think I'll change it to avoid that extra wakeups > 1 condition..

Also, its broken, even if we should not trylock, we should still very
much xchg(-1) to mark the lock as having waiters.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-18 14:14 ` Peter Zijlstra
@ 2016-08-18 15:22   ` Waiman Long
  0 siblings, 0 replies; 16+ messages in thread
From: Waiman Long @ 2016-08-18 15:22 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Jason Low, Ingo Molnar, imre.deak, linux-kernel, Jason Low,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On 08/18/2016 10:14 AM, Peter Zijlstra wrote:
> On Wed, Aug 10, 2016 at 11:44:08AM -0700, Jason Low wrote:
>> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
>> index 2cb7531..5643a233 100644
>> --- a/include/linux/mutex.h
>> +++ b/include/linux/mutex.h
>> @@ -57,6 +57,8 @@ struct mutex {
>>   #endif
>>   #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
>>   	struct optimistic_spin_queue osq; /* Spinner MCS lock */
>> +#elif defined(CONFIG_SMP)
>> +	bool yield_to_waiter; /* Prevent starvation when spinning disabled */
>>   #endif
>>   #ifdef CONFIG_DEBUG_MUTEXES
>>   	void			*magic;
> Isn't this also possible on !SMP&&  PREEMPT ?

I don't think there is any realistic chance that starvation will happen 
on a uniprocessor system with preemptible kernel. So I don't think that 
is necessary.

Cheers,
Longman

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled
  2016-08-18 15:18   ` Peter Zijlstra
@ 2016-08-18 15:30     ` Peter Zijlstra
  0 siblings, 0 replies; 16+ messages in thread
From: Peter Zijlstra @ 2016-08-18 15:30 UTC (permalink / raw)
  To: Jason Low
  Cc: Ingo Molnar, imre.deak, linux-kernel, Jason Low, Waiman Long,
	Davidlohr Bueso, Tim Chen, terry.rudd, Paul E. McKenney

On Thu, Aug 18, 2016 at 05:18:43PM +0200, Peter Zijlstra wrote:
> On Thu, Aug 18, 2016 at 04:27:35PM +0200, Peter Zijlstra wrote:
> > On Wed, Aug 10, 2016 at 11:44:08AM -0700, Jason Low wrote:
> > > @@ -556,8 +604,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> > >  		 * other waiters. We only attempt the xchg if the count is
> > >  		 * non-negative in order to avoid unnecessary xchg operations:
> > >  		 */
> > > -		if (atomic_read(&lock->count) >= 0 &&
> > > +		if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
> > > +		    atomic_read(&lock->count) >= 0 &&
> > >  		    (atomic_xchg_acquire(&lock->count, -1) == 1))
> > > +			if (wakeups > 1)
> > > +				clear_yield_to_waiter(lock);
> > > +
> > >  			break;
> > >  
> > >  		/*
> > 
> > There's some { } gone missing there...
> > 
> > Also, I think I'll change it to avoid that extra wakeups > 1 condition..
> 
> Also, its broken, even if we should not trylock, we should still very
> much xchg(-1) to mark the lock as having waiters.

Ah, no. Since need_yield_to_waiter() can only be true if there is an
actual waiter, at which point count must already be -1. /me adds a
comment.

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2016-08-19  0:58 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-10 18:44 [PATCH v2] locking/mutex: Prevent lock starvation when spinning is enabled Jason Low
2016-08-10 20:00 ` kbuild test robot
2016-08-10 20:01 ` kbuild test robot
2016-08-16 20:19   ` Jason Low
2016-08-10 22:07 ` Jason Low
2016-08-11  2:30 ` Jason Low
2016-08-11 15:40 ` Waiman Long
2016-08-16 19:44   ` Jason Low
2016-08-17  1:41 ` Wanpeng Li
2016-08-17 18:30   ` Jason Low
2016-08-18  0:38     ` Wanpeng Li
2016-08-18 14:14 ` Peter Zijlstra
2016-08-18 15:22   ` Waiman Long
2016-08-18 14:27 ` Peter Zijlstra
2016-08-18 15:18   ` Peter Zijlstra
2016-08-18 15:30     ` Peter Zijlstra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.