From: Waiman Long This patch removes some of the redundant ww_mutex code in __mutex_lock_common(). Cc: Ingo Molnar Cc: Linus Torvalds Cc: Tim Chen Cc: Thomas Gleixner Cc: Imre Deak Cc: Jason Low Cc: "Paul E. McKenney" Cc: Ding Tianhong Cc: Davidlohr Bueso Cc: Will Deacon Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) --- kernel/locking/mutex.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -580,10 +580,11 @@ __mutex_lock_common(struct mutex *lock, struct mutex_waiter waiter; unsigned long flags; bool first = false; + struct ww_mutex *ww; int ret; if (use_ww_ctx) { - struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); + ww = container_of(lock, struct ww_mutex, base); if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) return -EALREADY; } @@ -595,12 +596,8 @@ __mutex_lock_common(struct mutex *lock, mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { /* got the lock, yay! */ lock_acquired(&lock->dep_map, ip); - if (use_ww_ctx) { - struct ww_mutex *ww; - ww = container_of(lock, struct ww_mutex, base); - + if (use_ww_ctx) ww_mutex_set_context_fastpath(ww, ww_ctx); - } preempt_enable(); return 0; } @@ -680,10 +677,8 @@ __mutex_lock_common(struct mutex *lock, /* got the lock - cleanup and rejoice! */ lock_acquired(&lock->dep_map, ip); - if (use_ww_ctx) { - struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); + if (use_ww_ctx) ww_mutex_set_context_slowpath(ww, ww_ctx); - } spin_unlock_mutex(&lock->wait_lock, flags); preempt_enable();