From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751753AbcGTEmy (ORCPT ); Wed, 20 Jul 2016 00:42:54 -0400 Received: from g2t4618.austin.hp.com ([15.73.212.83]:58088 "EHLO g2t4618.austin.hp.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751282AbcGTEmx (ORCPT ); Wed, 20 Jul 2016 00:42:53 -0400 Message-ID: <1468989556.10247.22.camel@j-VirtualBox> Subject: Re: [RFC] Avoid mutex starvation when optimistic spinning is disabled From: Jason Low To: imre.deak@intel.com Cc: jason.low2@hpe.com, Peter Zijlstra , linux-kernel@vger.kernel.org, Ingo Molnar , Chris Wilson , Daniel Vetter , Waiman Long , Davidlohr Bueso , jason.low2@hp.com Date: Tue, 19 Jul 2016 21:39:16 -0700 In-Reply-To: <1468969470.10247.15.camel@j-VirtualBox> References: <1468858607-20481-1-git-send-email-imre.deak@intel.com> <20160718171537.GC6862@twins.programming.kicks-ass.net> <1468864069.2367.21.camel@j-VirtualBox> <1468947205.31332.40.camel@intel.com> <1468969470.10247.15.camel@j-VirtualBox> Content-Type: text/plain; charset="UTF-8" X-Mailer: Evolution 3.10.4-0ubuntu2 Mime-Version: 1.0 Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Tue, 2016-07-19 at 16:04 -0700, Jason Low wrote: > Hi Imre, > > Here is a patch which prevents a thread from spending too much "time" > waiting for a mutex in the !CONFIG_MUTEX_SPIN_ON_OWNER case. > > Would you like to try this out and see if this addresses the mutex > starvation issue you are seeing in your workload when optimistic > spinning is disabled? Although it looks like it didn't take care of the 'lock stealing' case in the slowpath. Here is the updated fixed version: --- Signed-off-by: Jason Low --- include/linux/mutex.h | 2 ++ kernel/locking/mutex.c | 65 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 60 insertions(+), 7 deletions(-) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 2cb7531..c1ca68d 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -57,6 +57,8 @@ struct mutex { #endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ +#else + bool yield_to_waiter; /* Prevent starvation when spinning disabled */ #endif #ifdef CONFIG_DEBUG_MUTEXES void *magic; diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index a70b90d..6c915ca 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -55,6 +55,8 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) mutex_clear_owner(lock); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER osq_lock_init(&lock->osq); +#else + lock->yield_to_waiter = false; #endif debug_mutex_init(lock, name, key); @@ -71,6 +73,9 @@ EXPORT_SYMBOL(__mutex_init); */ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); + +static inline bool need_yield_to_waiter(struct mutex *lock); + /** * mutex_lock - acquire the mutex * @lock: the mutex to be acquired @@ -95,11 +100,15 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); void __sched mutex_lock(struct mutex *lock) { might_sleep(); + /* * The locking fastpath is the 1->0 transition from * 'unlocked' into 'locked' state. */ - __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); + if (!need_yield_to_waiter(lock)) + __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); + else + __mutex_lock_slowpath(&lock->count); mutex_set_owner(lock); } @@ -398,12 +407,39 @@ done: return false; } + +static inline void do_yield_to_waiter(struct mutex *lock, int loops) +{ + return; +} + +static inline bool need_yield_to_waiter(struct mutex *lock) +{ + return false; +} + #else static bool mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) { return false; } + +#define MUTEX_MAX_WAIT 32 + +static inline void do_yield_to_waiter(struct mutex *lock, int loops) +{ + if (loops < MUTEX_MAX_WAIT) + return; + + if (lock->yield_to_waiter != true) + lock->yield_to_waiter =true; +} + +static inline bool need_yield_to_waiter(struct mutex *lock) +{ + return lock->yield_to_waiter; +} #endif __visible __used noinline @@ -510,6 +546,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct mutex_waiter waiter; unsigned long flags; int ret; + int loop = 0; if (use_ww_ctx) { struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); @@ -532,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * Once more, try to acquire the lock. Only try-lock the mutex if * it is unlocked to reduce unnecessary xchg() operations. */ - if (!mutex_is_locked(lock) && + if (!need_yield_to_waiter(lock) && !mutex_is_locked(lock) && (atomic_xchg_acquire(&lock->count, 0) == 1)) goto skip_wait; @@ -546,6 +583,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, lock_contended(&lock->dep_map, ip); for (;;) { + loop++; + /* * Lets try to take the lock again - this is needed even if * we get here for the first time (shortly after failing to @@ -556,7 +595,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * other waiters. We only attempt the xchg if the count is * non-negative in order to avoid unnecessary xchg operations: */ - if (atomic_read(&lock->count) >= 0 && + if ((!need_yield_to_waiter(lock) || loop > 1) && + atomic_read(&lock->count) >= 0 && (atomic_xchg_acquire(&lock->count, -1) == 1)) break; @@ -581,6 +621,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, spin_unlock_mutex(&lock->wait_lock, flags); schedule_preempt_disabled(); spin_lock_mutex(&lock->wait_lock, flags); + do_yield_to_waiter(lock, loop); } __set_task_state(task, TASK_RUNNING); @@ -590,6 +631,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, atomic_set(&lock->count, 0); debug_mutex_free_waiter(&waiter); +#ifndef CONFIG_MUTEX_SPIN_ON_OWNER + lock->yield_to_waiter = false; +#endif + skip_wait: /* got the lock - cleanup and rejoice! */ lock_acquired(&lock->dep_map, ip); @@ -789,10 +834,13 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock); */ int __sched mutex_lock_interruptible(struct mutex *lock) { - int ret; + int ret = 1; might_sleep(); - ret = __mutex_fastpath_lock_retval(&lock->count); + + if (!need_yield_to_waiter(lock)) + ret = __mutex_fastpath_lock_retval(&lock->count); + if (likely(!ret)) { mutex_set_owner(lock); return 0; @@ -804,10 +852,13 @@ EXPORT_SYMBOL(mutex_lock_interruptible); int __sched mutex_lock_killable(struct mutex *lock) { - int ret; + int ret = 1; might_sleep(); - ret = __mutex_fastpath_lock_retval(&lock->count); + + if (!need_yield_to_waiter(lock)) + ret = __mutex_fastpath_lock_retval(&lock->count); + if (likely(!ret)) { mutex_set_owner(lock); return 0; -- 2.1.4