There is but a single user and the previous patch mandates slowfn must be rt_mutex_slowunlock(), so fold the lot together and save a few lines. Signed-off-by: Peter Zijlstra (Intel) --- kernel/locking/rtmutex.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1377,24 +1377,6 @@ rt_mutex_fasttrylock(struct rt_mutex *lo return slowfn(lock); } -static inline void -rt_mutex_fastunlock(struct rt_mutex *lock, - bool (*slowfn)(struct rt_mutex *lock, - struct wake_q_head *wqh)) -{ - WAKE_Q(wake_q); - - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { - rt_mutex_deadlock_account_unlock(current); - - } else { - bool deboost = slowfn(lock, &wake_q); - - rt_mutex_postunlock(&wake_q, deboost); - } -} - - /* * Undo pi boosting (if necessary) and wake top waiter. */ @@ -1501,7 +1483,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); */ void __sched rt_mutex_unlock(struct rt_mutex *lock) { - rt_mutex_fastunlock(lock, rt_mutex_slowunlock); + WAKE_Q(wake_q); + + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { + rt_mutex_deadlock_account_unlock(current); + + } else { + bool deboost = rt_mutex_slowunlock(lock, &wake_q); + + rt_mutex_postunlock(&wake_q, deboost); + } } EXPORT_SYMBOL_GPL(rt_mutex_unlock);