linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vincent Guittot <vincent.guittot@linaro.org>
To: peterz@infradead.org, mingo@kernel.org,
	linux-kernel@vger.kernel.org, preeti@linux.vnet.ibm.com,
	Morten.Rasmussen@arm.com, kamalesh@linux.vnet.ibm.com,
	linux-arm-kernel@lists.infradead.org
Cc: riel@redhat.com, efault@gmx.de, nicolas.pitre@linaro.org,
	linaro-kernel@lists.linaro.org,
	Vincent Guittot <vincent.guittot@linaro.org>
Subject: [PATCH v9 10/10] sched: move cfs task on a CPU with higher capacity
Date: Mon,  3 Nov 2014 17:54:47 +0100	[thread overview]
Message-ID: <1415033687-23294-11-git-send-email-vincent.guittot@linaro.org> (raw)
In-Reply-To: <1415033687-23294-1-git-send-email-vincent.guittot@linaro.org>

When a CPU is used to handle a lot of IRQs or some RT tasks, the remaining
capacity for CFS tasks can be significantly reduced. Once we detect such
situation by comparing cpu_capacity_orig and cpu_capacity, we trig an idle
load balance to check if it's worth moving its tasks on an idle CPU.

Once the idle load_balance has selected the busiest CPU, it will look for an
active load balance for only two cases :
- there is only 1 task on the busiest CPU.
- we haven't been able to move a task of the busiest rq.

A CPU with a reduced capacity is included in the 1st case, and it's worth to
actively migrate its task if the idle CPU has got full capacity. This test has
been added in need_active_balance.

As a sidenote, this will note generate more spurious ilb because we already
trig an ilb if there is more than 1 busy cpu. If this cpu is the only one that
has a task, we will trig the ilb once for migrating the task.

The nohz_kick_needed function has been cleaned up a bit while adding the new
test

env.src_cpu and env.src_rq must be set unconditionnally because they are used
in need_active_balance which is called even if busiest->nr_running equals 1

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 74 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 53 insertions(+), 21 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index db392a6..02e8f7f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6634,6 +6634,28 @@ static int need_active_balance(struct lb_env *env)
 			return 1;
 	}
 
+	/*
+	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
+	 * It's worth migrating the task if the src_cpu's capacity is reduced
+	 * because of other sched_class or IRQs whereas capacity stays
+	 * available on dst_cpu.
+	 */
+	if ((env->idle != CPU_NOT_IDLE) &&
+			(env->src_rq->cfs.h_nr_running == 1)) {
+		unsigned long src_eff_capacity, dst_eff_capacity;
+
+		dst_eff_capacity = 100;
+		dst_eff_capacity *= capacity_of(env->dst_cpu);
+		dst_eff_capacity *= capacity_orig_of(env->src_cpu);
+
+		src_eff_capacity = sd->imbalance_pct;
+		src_eff_capacity *= capacity_of(env->src_cpu);
+		src_eff_capacity *= capacity_orig_of(env->dst_cpu);
+
+		if (src_eff_capacity < dst_eff_capacity)
+			return 1;
+	}
+
 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
 }
 
@@ -6733,6 +6755,9 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 
 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
 
+	env.src_cpu = busiest->cpu;
+	env.src_rq = busiest;
+
 	ld_moved = 0;
 	if (busiest->nr_running > 1) {
 		/*
@@ -6742,8 +6767,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 		 * correctly treated as an imbalance.
 		 */
 		env.flags |= LBF_ALL_PINNED;
-		env.src_cpu   = busiest->cpu;
-		env.src_rq    = busiest;
 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
 more_balance:
@@ -7443,22 +7466,25 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 
 /*
  * Current heuristic for kicking the idle load balancer in the presence
- * of an idle cpu is the system.
+ * of an idle cpu in the system.
  *   - This rq has more than one task.
- *   - At any scheduler domain level, this cpu's scheduler group has multiple
- *     busy cpu's exceeding the group's capacity.
+ *   - This rq has at least one CFS task and the capacity of the CPU is
+ *     significantly reduced because of RT tasks or IRQs.
+ *   - At parent of LLC scheduler domain level, this cpu's scheduler group has
+ *     multiple busy cpu.
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
-static inline int nohz_kick_needed(struct rq *rq)
+static inline bool nohz_kick_needed(struct rq *rq)
 {
 	unsigned long now = jiffies;
 	struct sched_domain *sd;
 	struct sched_group_capacity *sgc;
 	int nr_busy, cpu = rq->cpu;
+	bool kick = false;
 
 	if (unlikely(rq->idle_balance))
-		return 0;
+		return false;
 
        /*
 	* We may be recently in ticked or tickless idle mode. At the first
@@ -7472,38 +7498,44 @@ static inline int nohz_kick_needed(struct rq *rq)
 	 * balancing.
 	 */
 	if (likely(!atomic_read(&nohz.nr_cpus)))
-		return 0;
+		return false;
 
 	if (time_before(now, nohz.next_balance))
-		return 0;
+		return false;
 
 	if (rq->nr_running >= 2)
-		goto need_kick;
+		return true;
 
 	rcu_read_lock();
 	sd = rcu_dereference(per_cpu(sd_busy, cpu));
-
 	if (sd) {
 		sgc = sd->groups->sgc;
 		nr_busy = atomic_read(&sgc->nr_busy_cpus);
 
-		if (nr_busy > 1)
-			goto need_kick_unlock;
+		if (nr_busy > 1) {
+			kick = true;
+			goto unlock;
+		}
+
 	}
 
-	sd = rcu_dereference(per_cpu(sd_asym, cpu));
+	sd = rcu_dereference(rq->sd);
+	if (sd) {
+		if ((rq->cfs.h_nr_running >= 1) &&
+				check_cpu_capacity(rq, sd)) {
+			kick = true;
+			goto unlock;
+		}
+	}
 
+	sd = rcu_dereference(per_cpu(sd_asym, cpu));
 	if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
 				  sched_domain_span(sd)) < cpu))
-		goto need_kick_unlock;
+		kick = true;
 
+unlock:
 	rcu_read_unlock();
-	return 0;
-
-need_kick_unlock:
-	rcu_read_unlock();
-need_kick:
-	return 1;
+	return kick;
 }
 #else
 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
-- 
1.9.1


  parent reply	other threads:[~2014-11-03 16:57 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-11-03 16:54 [PATCH v9 00/10] sched: consolidation of CPU capacity and usage Vincent Guittot
2014-11-03 16:54 ` [PATCH v9 01/10] sched: add utilization_avg_contrib Vincent Guittot
2014-11-21 12:34   ` Morten Rasmussen
2014-11-24 14:04     ` Vincent Guittot
2014-11-24 17:34       ` Morten Rasmussen
2014-11-03 16:54 ` [PATCH v9 02/10] sched: Track group sched_entity usage contributions Vincent Guittot
2014-11-21 12:35   ` Morten Rasmussen
2014-11-24 14:04     ` Vincent Guittot
2014-11-24 15:39       ` Morten Rasmussen
2014-11-03 16:54 ` [PATCH v9 03/10] sched: remove frequency scaling from cpu_capacity Vincent Guittot
2014-11-21 12:35   ` Morten Rasmussen
2014-11-03 16:54 ` [PATCH v9 04/10] sched: Make sched entity usage tracking scale-invariant Vincent Guittot
2014-11-21 12:35   ` Morten Rasmussen
2014-11-26 16:05     ` Dietmar Eggemann
2014-11-03 16:54 ` [PATCH v9 05/10] sched: make scale_rt invariant with frequency Vincent Guittot
2014-11-21 12:35   ` Morten Rasmussen
2014-11-24 14:24     ` Vincent Guittot
2014-11-24 17:05       ` Morten Rasmussen
2014-11-25 13:48         ` Vincent Guittot
2014-11-26 11:57           ` Morten Rasmussen
2014-11-25  2:24   ` Wanpeng Li
2014-11-25 13:52     ` Vincent Guittot
2014-11-26  5:18       ` Wanpeng Li
2014-11-26  8:27         ` Vincent Guittot
2014-11-03 16:54 ` [PATCH v9 06/10] sched: add per rq cpu_capacity_orig Vincent Guittot
2014-11-03 16:54 ` [PATCH v9 07/10] sched: get CPU's usage statistic Vincent Guittot
2014-11-21 12:36   ` Morten Rasmussen
2014-11-03 16:54 ` [PATCH v9 08/10] sched: replace capacity_factor by usage Vincent Guittot
2014-11-19 15:15   ` pang.xunlei
2014-11-19 17:30     ` Vincent Guittot
2014-11-21 12:37   ` Morten Rasmussen
2014-11-24 14:41     ` Vincent Guittot
2014-11-24 17:16       ` Morten Rasmussen
2014-11-03 16:54 ` [PATCH v9 09/10] sched: add SD_PREFER_SIBLING for SMT level Vincent Guittot
2014-11-03 16:54 ` Vincent Guittot [this message]
2014-11-21 12:37   ` [PATCH v9 10/10] sched: move cfs task on a CPU with higher capacity Morten Rasmussen
2014-11-24 14:45     ` Vincent Guittot
2014-11-24 17:30       ` Morten Rasmussen
2014-11-21 12:34 ` [PATCH v9 00/10] sched: consolidation of CPU capacity and usage Morten Rasmussen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1415033687-23294-11-git-send-email-vincent.guittot@linaro.org \
    --to=vincent.guittot@linaro.org \
    --cc=Morten.Rasmussen@arm.com \
    --cc=efault@gmx.de \
    --cc=kamalesh@linux.vnet.ibm.com \
    --cc=linaro-kernel@lists.linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=nicolas.pitre@linaro.org \
    --cc=peterz@infradead.org \
    --cc=preeti@linux.vnet.ibm.com \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).