--- linux-2.6.15-rt17/kernel/rt.c.orig 2006-02-22 16:53:44.000000000 +0100 +++ linux-2.6.15-rt17/kernel/rt.c 2006-02-25 23:20:34.000000000 +0100 @@ -873,10 +873,19 @@ pid_t get_blocked_on(task_t *task) } lock = task->blocked_on->lock; + + /* + * Now we have to take lock->wait_lock _before_ releasing + * task->pi_lock. Otherwise lock can be deallocated while we are + * refering to it as the subsystem has no way of knowing about us + * hanging around in here. + */ + if (!_raw_spin_trylock(&lock->wait_lock)) { + _raw_spin_unlock(&task->pi_lock); + goto try_again; + } _raw_spin_unlock(&task->pi_lock); - if (!_raw_spin_trylock(&lock->wait_lock)) - goto try_again; owner = lock_owner(lock); if (owner) @@ -964,14 +973,52 @@ static inline int calc_pi_prio(task_t *t } /* - * Adjust priority of a task + * Adjust priority of a task. */ -static void adjust_prio(task_t *task) +static void adjust_prio_no_wakeup(task_t *task) { int prio = calc_pi_prio(task); - if (task->prio != prio) + if (task->prio != prio) { + mutex_setprio(task, prio); + } +} + +/* + * Adjust priority of a task and wake it up if the prio is changed + * and it is blocked on a mutex + */ +static void adjust_prio_wakeup(task_t *task) +{ + int prio = calc_pi_prio(task); + + if (task->prio < prio) { + if (task->blocked_on) { + /* + * The owner will have the blocked field set if it is + * blocked on a lock. So in this case we want to wake + * the owner up so it can boost who it is blocked on. + * + * We have to wait with lowering it's priority until this is done + * or we risk letting other high priority task hang around. + */ + wake_up_process_mutex(task); + } + else { + mutex_setprio(task, prio); + } + } + else if (task->prio > prio) { mutex_setprio(task, prio); + if (task->blocked_on) { + /* + * The owner will have the blocked field set if it is + * blocked on a lock. So in this case we want to wake + * the owner up so it can boost who it is blocked on. + */ + wake_up_process_mutex(task); + } + } } /* @@ -1001,6 +1048,7 @@ static long task_blocks_on_lock(struct r /* Enqueue the task into the lock waiter list */ _raw_spin_lock(¤t->pi_lock); + adjust_prio_no_wakeup(current); current->blocked_on = waiter; waiter->lock = lock; waiter->task = current; @@ -1034,16 +1082,7 @@ static long task_blocks_on_lock(struct r /* Add the new top priority waiter to the owners waiter list */ plist_add(&waiter->pi_list, &owner->pi_waiters); - adjust_prio(owner); - - /* - * The owner will have the blocked field set if it is - * blocked on a lock. So in this case we want to wake - * the owner up so it can boost who it is blocked on. - */ - if (owner->blocked_on) - wake_up_process_mutex(owner); - + adjust_prio_wakeup(owner); _raw_spin_unlock(&owner->pi_lock); return ret; } @@ -1139,7 +1178,7 @@ static void remove_waiter(struct rt_mute next = lock_first_waiter(lock); plist_add(&next->pi_list, &owner->pi_waiters); } - adjust_prio(owner); + adjust_prio_wakeup(owner); _raw_spin_unlock(&owner->pi_lock); } } @@ -1201,7 +1240,7 @@ static void release_lock(struct rt_mutex /* Readjust priority, when necessary. */ _raw_spin_lock(¤t->pi_lock); - adjust_prio(current); + adjust_prio_no_wakeup(current); _raw_spin_unlock(¤t->pi_lock); } @@ -1453,7 +1492,7 @@ static int __sched down_rtsem(struct rt_ * PI boost has to go */ _raw_spin_lock(¤t->pi_lock); - adjust_prio(current); + adjust_prio_no_wakeup(current); _raw_spin_unlock(¤t->pi_lock); } trace_unlock_irqrestore(&tracelock, flags);