From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Chris Mason <chris.mason@oracle.com>,
Frank Rowand <frank.rowand@am.sony.com>,
Ingo Molnar <mingo@elte.hu>, Thomas Gleixner <tglx@linutronix.de>,
Mike Galbraith <efault@gmx.de>, Oleg Nesterov <oleg@redhat.com>,
Paul Turner <pjt@google.com>, Jens Axboe <axboe@kernel.dk>,
Yong Zhang <yong.zhang0@gmail.com>
Cc: linux-kernel@vger.kernel.org, Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: [PATCH 13/21] sched: Add p->pi_lock to task_rq_lock()
Date: Tue, 05 Apr 2011 17:23:51 +0200 [thread overview]
Message-ID: <20110405152729.232781355@chello.nl> (raw)
In-Reply-To: 20110405152338.692966333@chello.nl
[-- Attachment #1: sched-ttwu-task_rq_lock.patch --]
[-- Type: text/plain, Size: 10564 bytes --]
In order to be able to call set_task_cpu() while either holding
p->pi_lock or task_rq(p)->lock we need to hold both locks in order to
stabilize task_rq().
This makes task_rq_lock() acquire both locks, and have
__task_rq_lock() validate that p->pi_lock is held. This increases the
locking overhead for most scheduler syscalls but allows reduction of
rq->lock contention for some scheduler hot paths (ttwu).
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
---
kernel/sched.c | 103 ++++++++++++++++++++++++++-------------------------------
1 file changed, 47 insertions(+), 56 deletions(-)
Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -600,7 +600,7 @@ static inline int cpu_of(struct rq *rq)
* Return the group to which this tasks belongs.
*
* We use task_subsys_state_check() and extend the RCU verification
- * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
+ * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
* holds that lock for each task it moves into the cgroup. Therefore
* by holding that lock, we pin the task to the current cgroup.
*/
@@ -610,7 +610,7 @@ static inline struct task_group *task_gr
struct cgroup_subsys_state *css;
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
- lockdep_is_held(&task_rq(p)->lock));
+ lockdep_is_held(&p->pi_lock));
tg = container_of(css, struct task_group, css);
return autogroup_task_group(p, tg);
@@ -926,23 +926,15 @@ static inline void finish_lock_switch(st
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
- * Check whether the task is waking, we use this to synchronize ->cpus_allowed
- * against ttwu().
- */
-static inline int task_is_waking(struct task_struct *p)
-{
- return unlikely(p->state == TASK_WAKING);
-}
-
-/*
- * __task_rq_lock - lock the runqueue a given task resides on.
- * Must be called interrupts disabled.
+ * __task_rq_lock - lock the rq @p resides on.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct rq *rq;
+ lockdep_assert_held(&p->pi_lock);
+
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
@@ -953,22 +945,22 @@ static inline struct rq *__task_rq_lock(
}
/*
- * task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts. Note the ordering: we can safely lookup the task_rq without
- * explicitly disabling preemption.
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(p->pi_lock)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
- local_irq_save(*flags);
+ raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- raw_spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
}
@@ -978,10 +970,13 @@ static void __task_rq_unlock(struct rq *
raw_spin_unlock(&rq->lock);
}
-static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
__releases(rq->lock)
+ __releases(p->pi_lock)
{
- raw_spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
/*
@@ -2178,6 +2173,11 @@ void set_task_cpu(struct task_struct *p,
*/
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
+
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
+ lockdep_is_held(&task_rq(p)->lock)));
+#endif
#endif
trace_sched_migrate_task(p, new_cpu);
@@ -2273,7 +2273,7 @@ unsigned long wait_task_inactive(struct
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
/*
* If it changed from the expected state, bail out now.
@@ -2639,6 +2639,7 @@ static void __sched_fork(struct task_str
*/
void sched_fork(struct task_struct *p, int clone_flags)
{
+ unsigned long flags;
int cpu = get_cpu();
__sched_fork(p);
@@ -2689,9 +2690,9 @@ void sched_fork(struct task_struct *p, i
*
* Silence PROVE_RCU.
*/
- rcu_read_lock();
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
set_task_cpu(p, cpu);
- rcu_read_unlock();
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
@@ -2740,7 +2741,7 @@ void wake_up_new_task(struct task_struct
set_task_cpu(p, cpu);
p->state = TASK_RUNNING;
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
#endif
rq = task_rq_lock(p, &flags);
@@ -2751,7 +2752,7 @@ void wake_up_new_task(struct task_struct
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
#endif
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
put_cpu();
}
@@ -3476,12 +3477,12 @@ void sched_exec(void)
likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
struct migration_arg arg = { p, dest_cpu };
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
return;
}
unlock:
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
}
#endif
@@ -3518,7 +3519,7 @@ unsigned long long task_delta_exec(struc
rq = task_rq_lock(p, &flags);
ns = do_task_delta_exec(p, rq);
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
return ns;
}
@@ -3536,7 +3537,7 @@ unsigned long long task_sched_runtime(st
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
return ns;
}
@@ -3560,7 +3561,7 @@ unsigned long long thread_group_sched_ru
rq = task_rq_lock(p, &flags);
thread_group_cputime(p, &totals);
ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
return ns;
}
@@ -4675,16 +4676,13 @@ EXPORT_SYMBOL(sleep_on_timeout);
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
- unsigned long flags;
int oldprio, on_rq, running;
struct rq *rq;
const struct sched_class *prev_class;
BUG_ON(prio < 0 || prio > MAX_PRIO);
- lockdep_assert_held(&p->pi_lock);
-
- rq = task_rq_lock(p, &flags);
+ rq = __task_rq_lock(p);
trace_sched_pi_setprio(p, prio);
oldprio = p->prio;
@@ -4709,7 +4707,7 @@ void rt_mutex_setprio(struct task_struct
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
check_class_changed(rq, p, prev_class, oldprio);
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
}
#endif
@@ -4757,7 +4755,7 @@ void set_user_nice(struct task_struct *p
resched_task(rq->curr);
}
out_unlock:
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
}
EXPORT_SYMBOL(set_user_nice);
@@ -4979,20 +4977,17 @@ static int __sched_setscheduler(struct t
/*
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
- */
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- /*
+ *
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
*/
- rq = __task_rq_lock(p);
+ rq = task_rq_lock(p, &flags);
/*
* Changing the policy of the stop threads its a very bad idea
*/
if (p == rq->stop) {
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
return -EINVAL;
}
@@ -5005,8 +5000,7 @@ static int __sched_setscheduler(struct t
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
!task_group_is_autogroup(task_group(p))) {
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
return -EPERM;
}
}
@@ -5015,8 +5009,7 @@ static int __sched_setscheduler(struct t
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
goto recheck;
}
on_rq = p->on_rq;
@@ -5038,8 +5031,7 @@ static int __sched_setscheduler(struct t
activate_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio);
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
rt_mutex_adjust_pi(p);
@@ -5620,7 +5612,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, p
rq = task_rq_lock(p, &flags);
time_slice = p->sched_class->get_rr_interval(rq, p);
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, p, &flags);
rcu_read_unlock();
jiffies_to_timespec(time_slice, &t);
@@ -5843,8 +5835,7 @@ int set_cpus_allowed_ptr(struct task_str
unsigned int dest_cpu;
int ret = 0;
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- rq = __task_rq_lock(p);
+ rq = task_rq_lock(p, &flags);
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
@@ -5872,15 +5863,13 @@ int set_cpus_allowed_ptr(struct task_str
if (need_migrate_task(p)) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
out:
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
return ret;
}
@@ -5908,6 +5897,7 @@ static int __migrate_task(struct task_st
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
+ raw_spin_lock(&p->pi_lock);
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
@@ -5930,6 +5920,7 @@ static int __migrate_task(struct task_st
ret = 1;
fail:
double_rq_unlock(rq_src, rq_dest);
+ raw_spin_unlock(&p->pi_lock);
return ret;
}
@@ -8656,7 +8647,7 @@ void sched_move_task(struct task_struct
if (on_rq)
enqueue_task(rq, tsk, 0);
- task_rq_unlock(rq, &flags);
+ task_rq_unlock(rq, tsk, &flags);
}
#endif /* CONFIG_CGROUP_SCHED */
next prev parent reply other threads:[~2011-04-05 15:31 UTC|newest]
Thread overview: 152+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-04-05 15:23 [PATCH 00/21] sched: Reduce runqueue lock contention -v6 Peter Zijlstra
2011-04-05 15:23 ` [PATCH 01/21] sched: Provide scheduler_ipi() callback in response to smp_send_reschedule() Peter Zijlstra
2011-04-13 21:15 ` Tony Luck
2011-04-13 21:38 ` Peter Zijlstra
2011-04-14 8:31 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 02/21] sched: Always provide p->on_cpu Peter Zijlstra
2011-04-14 8:31 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 03/21] mutex: Use p->on_cpu for the adaptive spin Peter Zijlstra
2011-04-14 8:32 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 04/21] sched: Change the ttwu success details Peter Zijlstra
2011-04-13 9:23 ` Peter Zijlstra
2011-04-13 10:48 ` Peter Zijlstra
2011-04-13 11:06 ` Peter Zijlstra
2011-04-13 18:39 ` Tejun Heo
2011-04-13 19:11 ` Peter Zijlstra
2011-04-14 8:32 ` [tip:sched/locking] sched: Change the ttwu() " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 05/21] sched: Clean up ttwu stats Peter Zijlstra
2011-04-14 8:33 ` [tip:sched/locking] sched: Clean up ttwu() stats tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 06/21] sched: Provide p->on_rq Peter Zijlstra
2011-04-14 8:33 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 07/21] sched: Serialize p->cpus_allowed and ttwu() using p->pi_lock Peter Zijlstra
2011-04-14 8:34 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 08/21] sched: Drop the rq argument to sched_class::select_task_rq() Peter Zijlstra
2011-04-14 8:34 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 09/21] sched: Remove rq argument to sched_class::task_waking() Peter Zijlstra
2011-04-14 8:35 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 10/21] sched: Deal with non-atomic min_vruntime reads on 32bits Peter Zijlstra
2011-04-14 8:35 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 11/21] sched: Delay task_contributes_to_load() Peter Zijlstra
2011-04-14 8:35 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 12/21] sched: Also serialize ttwu_local() with p->pi_lock Peter Zijlstra
2011-04-14 8:36 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` Peter Zijlstra [this message]
2011-04-14 8:36 ` [tip:sched/locking] sched: Add p->pi_lock to task_rq_lock() tip-bot for Peter Zijlstra
2011-06-01 13:58 ` Arne Jansen
2011-06-01 16:35 ` Peter Zijlstra
2011-06-01 17:20 ` Arne Jansen
2011-06-01 18:09 ` Peter Zijlstra
2011-06-01 18:44 ` Peter Zijlstra
2011-06-01 19:30 ` Arne Jansen
2011-06-01 21:09 ` Linus Torvalds
2011-06-03 9:15 ` Peter Zijlstra
2011-06-03 10:02 ` Arne Jansen
2011-06-03 10:30 ` Peter Zijlstra
2011-06-03 11:52 ` Arne Jansen
2011-06-05 8:17 ` Ingo Molnar
2011-06-05 8:53 ` Arne Jansen
2011-06-05 9:41 ` Ingo Molnar
2011-06-05 9:45 ` Ingo Molnar
2011-06-05 9:43 ` Arne Jansen
2011-06-05 9:55 ` Ingo Molnar
2011-06-05 10:22 ` Arne Jansen
2011-06-05 11:01 ` Ingo Molnar
2011-06-05 11:19 ` [debug patch] printk: Add a printk killswitch to robustify NMI watchdog messages Ingo Molnar
2011-06-05 11:36 ` Ingo Molnar
2011-06-05 11:57 ` Arne Jansen
2011-06-05 13:39 ` Ingo Molnar
2011-06-05 13:54 ` Arne Jansen
2011-06-05 14:06 ` Ingo Molnar
2011-06-05 14:45 ` Arne Jansen
2011-06-05 14:10 ` Ingo Molnar
2011-06-05 14:31 ` Arne Jansen
2011-06-05 15:13 ` Ingo Molnar
2011-06-05 15:26 ` Ingo Molnar
2011-06-05 15:32 ` Ingo Molnar
2011-06-05 16:07 ` Arne Jansen
2011-06-05 16:35 ` Arne Jansen
2011-06-05 16:50 ` Arne Jansen
2011-06-05 17:20 ` Ingo Molnar
2011-06-05 17:42 ` Arne Jansen
2011-06-05 18:59 ` Ingo Molnar
2011-06-05 19:30 ` Arne Jansen
2011-06-05 19:44 ` Ingo Molnar
2011-06-05 20:15 ` Arne Jansen
2011-06-06 6:56 ` Arne Jansen
2011-06-06 9:01 ` Peter Zijlstra
2011-06-06 9:18 ` Arne Jansen
2011-06-06 9:24 ` Peter Zijlstra
2011-06-06 9:52 ` Peter Zijlstra
2011-06-06 10:00 ` Arne Jansen
2011-06-06 10:26 ` Peter Zijlstra
2011-06-06 13:25 ` Peter Zijlstra
2011-06-06 15:04 ` Ingo Molnar
2011-06-06 15:08 ` Ingo Molnar
2011-06-06 17:44 ` Mike Galbraith
2011-06-07 5:20 ` Mike Galbraith
2011-06-06 13:10 ` Ingo Molnar
2011-06-06 13:12 ` Peter Zijlstra
2011-06-06 13:21 ` Ingo Molnar
2011-06-06 13:31 ` Peter Zijlstra
2011-06-06 7:34 ` Arne Jansen
2011-06-05 15:34 ` Arne Jansen
2011-06-06 8:38 ` Peter Zijlstra
2011-06-06 14:58 ` Ingo Molnar
2011-06-06 15:09 ` Peter Zijlstra
2011-06-06 15:47 ` Peter Zijlstra
2011-06-06 15:52 ` Ingo Molnar
2011-06-06 16:00 ` Peter Zijlstra
2011-06-06 16:08 ` Ingo Molnar
2011-06-06 16:12 ` Peter Zijlstra
2011-06-06 16:17 ` Ingo Molnar
2011-06-06 16:38 ` Arne Jansen
2011-06-06 16:45 ` Arne Jansen
2011-06-06 16:53 ` Peter Zijlstra
2011-06-06 17:07 ` Ingo Molnar
2011-06-06 17:11 ` Peter Zijlstra
2011-06-08 15:50 ` Peter Zijlstra
2011-06-08 19:17 ` Ingo Molnar
2011-06-08 19:27 ` Linus Torvalds
2011-06-08 20:32 ` Peter Zijlstra
2011-06-08 20:53 ` Linus Torvalds
2011-06-08 20:54 ` Thomas Gleixner
2011-06-08 19:45 ` Peter Zijlstra
2011-06-08 20:52 ` Ingo Molnar
2011-06-08 21:49 ` Peter Zijlstra
2011-06-08 21:57 ` Thomas Gleixner
2011-06-06 16:44 ` Peter Zijlstra
2011-06-06 16:50 ` Peter Zijlstra
2011-06-06 17:13 ` Ingo Molnar
2011-06-06 17:04 ` Peter Zijlstra
2011-06-06 17:11 ` Ingo Molnar
2011-06-06 17:57 ` Arne Jansen
2011-06-06 18:07 ` Ingo Molnar
2011-06-06 18:14 ` Arne Jansen
2011-06-06 18:19 ` Peter Zijlstra
2011-06-06 22:08 ` Ingo Molnar
2011-06-03 12:44 ` [tip:sched/locking] sched: Add p->pi_lock to task_rq_lock() Linus Torvalds
2011-06-03 13:05 ` Arne Jansen
2011-06-04 21:29 ` Linus Torvalds
2011-06-04 22:08 ` Peter Zijlstra
2011-06-04 22:50 ` Linus Torvalds
2011-06-05 6:01 ` Arne Jansen
2011-06-05 7:57 ` Mike Galbraith
2011-04-05 15:23 ` [PATCH 14/21] sched: Drop rq->lock from first part of wake_up_new_task() Peter Zijlstra
2011-04-14 8:37 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 15/21] sched: Drop rq->lock from sched_exec() Peter Zijlstra
2011-04-14 8:37 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 16/21] sched: Remove rq->lock from the first half of ttwu() Peter Zijlstra
2011-04-14 8:38 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 17/21] sched: Remove rq argument from ttwu_stat() Peter Zijlstra
2011-04-14 8:38 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 18/21] sched: Rename ttwu_post_activation Peter Zijlstra
2011-04-14 8:39 ` [tip:sched/locking] sched: Rename ttwu_post_activation() to ttwu_do_wakeup() tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 19/21] sched: Restructure ttwu some more Peter Zijlstra
2011-04-14 8:39 ` [tip:sched/locking] sched: Restructure ttwu() " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 20/21] sched: Move the second half of ttwu() to the remote cpu Peter Zijlstra
2011-04-14 8:39 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:23 ` [PATCH 21/21] sched: Remove need_migrate_task() Peter Zijlstra
2011-04-14 8:40 ` [tip:sched/locking] " tip-bot for Peter Zijlstra
2011-04-05 15:59 ` [PATCH 00/21] sched: Reduce runqueue lock contention -v6 Peter Zijlstra
2011-04-06 11:00 ` Peter Zijlstra
2011-04-27 16:54 ` Dave Kleikamp
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110405152729.232781355@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=axboe@kernel.dk \
--cc=chris.mason@oracle.com \
--cc=efault@gmx.de \
--cc=frank.rowand@am.sony.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=oleg@redhat.com \
--cc=pjt@google.com \
--cc=tglx@linutronix.de \
--cc=yong.zhang0@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).