From: tip-bot for Morten Rasmussen <tipbot@zytor.com>
To: linux-tip-commits@vger.kernel.org
Cc: mingo@redhat.com, torvalds@linux-foundation.org,
peterz@infradead.org, hpa@zytor.com,
linux-kernel@vger.kernel.org, mingo@kernel.org, efault@gmx.de,
riel@redhat.com, morten.rasmussen@arm.com, tglx@linutronix.de
Subject: [tip:sched/core] sched/fair: Make the use of prev_cpu consistent in the wakeup path
Date: Wed, 10 Aug 2016 11:03:32 -0700 [thread overview]
Message-ID: <tip-772bd008cd9a1d4e8ce566f2edcc61d1c28fcbe5@git.kernel.org> (raw)
In-Reply-To: <1466615004-3503-3-git-send-email-morten.rasmussen@arm.com>
Commit-ID: 772bd008cd9a1d4e8ce566f2edcc61d1c28fcbe5
Gitweb: http://git.kernel.org/tip/772bd008cd9a1d4e8ce566f2edcc61d1c28fcbe5
Author: Morten Rasmussen <morten.rasmussen@arm.com>
AuthorDate: Wed, 22 Jun 2016 18:03:13 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Wed, 10 Aug 2016 14:03:32 +0200
sched/fair: Make the use of prev_cpu consistent in the wakeup path
In commit:
ac66f5477239 ("sched/numa: Introduce migrate_swap()")
select_task_rq() got a 'cpu' argument to enable overriding of prev_cpu
in special cases (NUMA task swapping).
However, the select_task_rq_fair() helper functions: wake_affine() and
select_idle_sibling(), still use task_cpu(p) directly to work out
prev_cpu, which leads to inconsistencies.
This patch passes prev_cpu (potentially overridden by NUMA code) into
the helper functions to ensure prev_cpu is indeed the same CPU
everywhere in the wakeup path.
cc: Ingo Molnar <mingo@redhat.com>
cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: linux-kernel@vger.kernel.org
Cc: mgalbraith@suse.de
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1466615004-3503-3-git-send-email-morten.rasmussen@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/fair.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9f9a4e5..d819da6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -656,7 +656,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#ifdef CONFIG_SMP
-static int select_idle_sibling(struct task_struct *p, int cpu);
+static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
static unsigned long task_h_load(struct task_struct *p);
/*
@@ -1512,7 +1512,8 @@ balance:
* Call select_idle_sibling to maybe find a better one.
*/
if (!cur)
- env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+ env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
+ env->dst_cpu);
assign:
task_numa_assign(env, cur, imp);
@@ -5101,18 +5102,18 @@ static int wake_wide(struct task_struct *p)
return 1;
}
-static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
+static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ int prev_cpu, int sync)
{
s64 this_load, load;
s64 this_eff_load, prev_eff_load;
- int idx, this_cpu, prev_cpu;
+ int idx, this_cpu;
struct task_group *tg;
unsigned long weight;
int balanced;
idx = sd->wake_idx;
this_cpu = smp_processor_id();
- prev_cpu = task_cpu(p);
load = source_load(prev_cpu, idx);
this_load = target_load(this_cpu, idx);
@@ -5277,11 +5278,10 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
/*
* Try and locate an idle CPU in the sched_domain.
*/
-static int select_idle_sibling(struct task_struct *p, int target)
+static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
struct sched_group *sg;
- int i = task_cpu(p);
if (idle_cpu(target))
return target;
@@ -5289,8 +5289,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
/*
* If the prevous cpu is cache affine and idle, don't be stupid.
*/
- if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
- return i;
+ if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
+ return prev;
/*
* Otherwise, iterate the domains and find an eligible idle cpu.
@@ -5311,6 +5311,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
for_each_lower_domain(sd) {
sg = sd->groups;
do {
+ int i;
+
if (!cpumask_intersects(sched_group_cpus(sg),
tsk_cpus_allowed(p)))
goto next;
@@ -5419,13 +5421,13 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (affine_sd) {
sd = NULL; /* Prefer wake_affine over balance flags */
- if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
+ if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
new_cpu = cpu;
}
if (!sd) {
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
- new_cpu = select_idle_sibling(p, new_cpu);
+ new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
} else while (sd) {
struct sched_group *group;
next prev parent reply other threads:[~2016-08-10 18:04 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-22 17:03 [PATCH v2 00/13] sched: Clean-ups and asymmetric cpu capacity support Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 01/13] sched: Fix power to capacity renaming in comment Morten Rasmussen
2016-08-10 18:03 ` [tip:sched/core] sched/core: " tip-bot for Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 02/13] sched/fair: Consistent use of prev_cpu in wakeup path Morten Rasmussen
2016-06-22 18:04 ` Rik van Riel
2016-06-23 9:56 ` Morten Rasmussen
2016-06-23 12:24 ` Rik van Riel
2016-08-10 18:03 ` tip-bot for Morten Rasmussen [this message]
2016-06-22 17:03 ` [PATCH v2 03/13] sched/fair: Optimize find_idlest_cpu() when there is no choice Morten Rasmussen
2016-07-13 12:20 ` Vincent Guittot
2016-08-10 18:03 ` [tip:sched/core] " tip-bot for Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 04/13] sched: Introduce SD_ASYM_CPUCAPACITY sched_domain topology flag Morten Rasmussen
2016-07-11 9:55 ` Peter Zijlstra
2016-07-11 10:42 ` Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 05/13] sched: Enable SD_BALANCE_WAKE for asymmetric capacity systems Morten Rasmussen
2016-07-11 10:04 ` Peter Zijlstra
2016-07-11 10:37 ` Morten Rasmussen
2016-07-11 11:04 ` Morten Rasmussen
2016-07-11 11:24 ` Peter Zijlstra
2016-07-12 14:26 ` Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 06/13] sched: Store maximum per-cpu capacity in root domain Morten Rasmussen
2016-07-11 10:18 ` Peter Zijlstra
2016-07-11 16:16 ` Dietmar Eggemann
2016-07-12 11:42 ` Peter Zijlstra
2016-07-13 11:18 ` Dietmar Eggemann
2016-07-13 12:40 ` Vincent Guittot
2016-07-13 13:48 ` Dietmar Eggemann
2016-07-13 16:37 ` Morten Rasmussen
2016-07-14 13:25 ` Vincent Guittot
2016-07-14 15:15 ` Morten Rasmussen
2016-07-15 11:46 ` Morten Rasmussen
2016-07-15 13:39 ` Vincent Guittot
2016-07-15 16:02 ` Morten Rasmussen
2016-07-18 12:48 ` Vincent Guittot
2016-07-18 15:11 ` Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 07/13] sched/fair: Let asymmetric cpu configurations balance at wake-up Morten Rasmussen
2016-07-11 11:13 ` Peter Zijlstra
2016-07-11 12:32 ` Morten Rasmussen
2016-07-13 12:56 ` Vincent Guittot
2016-07-13 16:14 ` Morten Rasmussen
2016-07-14 13:45 ` Vincent Guittot
2016-07-15 8:37 ` Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 08/13] sched/fair: Compute task/cpu utilization at wake-up more correctly Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 09/13] sched/fair: Consider spare capacity in find_idlest_group() Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 10/13] sched: Add per-cpu max capacity to sched_group_capacity Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 11/13] sched/fair: Avoid pulling tasks from non-overloaded higher capacity groups Morten Rasmussen
2016-06-23 21:20 ` Sai Gurrappadi
2016-06-30 7:49 ` Morten Rasmussen
2016-07-14 16:39 ` Sai Gurrappadi
2016-07-15 8:39 ` Morten Rasmussen
2016-07-12 12:59 ` Peter Zijlstra
2016-07-12 14:34 ` Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 12/13] arm: Set SD_ASYM_CPUCAPACITY for big.LITTLE platforms Morten Rasmussen
2016-06-22 17:03 ` [PATCH v2 13/13] arm: Update arch_scale_cpu_capacity() to reflect change to define Morten Rasmussen
2016-06-28 10:20 ` [PATCH v2 00/13] sched: Clean-ups and asymmetric cpu capacity support Koan-Sin Tan
2016-06-30 7:53 ` Morten Rasmussen
2016-07-08 7:35 ` KEITA KOBAYASHI
2016-07-08 8:18 ` Morten Rasmussen
2016-07-11 8:33 ` Morten Rasmussen
2016-07-11 12:44 ` Vincent Guittot
2016-07-12 13:25 ` Peter Zijlstra
2016-07-12 14:39 ` Morten Rasmussen
2016-07-13 12:06 ` Vincent Guittot
2016-07-13 15:54 ` Morten Rasmussen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=tip-772bd008cd9a1d4e8ce566f2edcc61d1c28fcbe5@git.kernel.org \
--to=tipbot@zytor.com \
--cc=efault@gmx.de \
--cc=hpa@zytor.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-tip-commits@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=mingo@redhat.com \
--cc=morten.rasmussen@arm.com \
--cc=peterz@infradead.org \
--cc=riel@redhat.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).