Now that we guarantee hb queue and rt_mutex waiter state match up, we no longer need to deal with the fallout of when they don't. Signed-off-by: Peter Zijlstra (Intel) --- kernel/futex.c | 38 +------------------------------------- 1 file changed, 1 insertion(+), 37 deletions(-) --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1396,13 +1396,7 @@ static int wake_futex_pi(u32 __user *uad raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); - /* - * It is possible that the next waiter (the one that brought - * top_waiter owner to the kernel) timed out and is no longer - * waiting on the lock. - */ - if (!new_owner) - new_owner = top_waiter->task; + BUG_ON(!new_owner); /* * We pass it to the next owner. The WAITERS bit is always @@ -2324,7 +2318,6 @@ static long futex_wait_restart(struct re */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { - struct task_struct *owner; int ret = 0; if (locked) { @@ -2337,35 +2330,6 @@ static int fixup_owner(u32 __user *uaddr goto out; } - /* - * Catch the rare case, where the lock was released when we were on the - * way back before we locked the hash bucket. - */ - if (q->pi_state->owner == current) { - /* - * Try to get the rt_mutex now. This might fail as some other - * task acquired the rt_mutex after we removed ourself from the - * rt_mutex waiters list. - */ - if (rt_mutex_futex_trylock(&q->pi_state->pi_mutex)) { - locked = 1; - goto out; - } - - /* - * pi_state is incorrect, some other task did a lock steal and - * we returned due to timeout or signal without taking the - * rt_mutex. Too late. - */ - raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); - owner = rt_mutex_owner(&q->pi_state->pi_mutex); - if (!owner) - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); - raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); - ret = fixup_pi_state_owner(uaddr, q, owner); - goto out; - } - /* * Paranoia check. If we did not take the lock, then we should not be * the owner of the rt_mutex.