* [PATCH 1/7] sched: Remove rq->cpu_load[] update code
2019-05-27 6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
@ 2019-05-27 6:21 ` Dietmar Eggemann
2019-05-27 16:09 ` Rik van Riel
2019-06-03 13:02 ` [tip:sched/core] sched/fair: Remove the " tip-bot for Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload() Dietmar Eggemann
` (5 subsequent siblings)
6 siblings, 2 replies; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-27 6:21 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Rik van Riel,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
With LB_BIAS disabled, there is no need to update the rq->cpu_load[idx]
any more.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
include/linux/sched/nohz.h | 8 --
kernel/sched/core.c | 1 -
kernel/sched/fair.c | 255 -------------------------------------
kernel/sched/sched.h | 6 -
kernel/time/tick-sched.c | 2 -
5 files changed, 272 deletions(-)
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index b36f4cf38111..1abe91ff6e4a 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -6,14 +6,6 @@
* This is the interface between the scheduler and nohz/dynticks:
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void cpu_load_update_nohz_start(void);
-extern void cpu_load_update_nohz_stop(void);
-#else
-static inline void cpu_load_update_nohz_start(void) { }
-static inline void cpu_load_update_nohz_stop(void) { }
-#endif
-
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern int get_nohz_timer_target(void);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 102dfcf0a29a..e8bee37b78fd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3032,7 +3032,6 @@ void scheduler_tick(void)
update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0);
- cpu_load_update_active(rq);
calc_global_load_tick(rq);
psi_task_tick(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f35930f5e528..f619b93ca331 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5325,71 +5325,6 @@ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
#ifdef CONFIG_NO_HZ_COMMON
-/*
- * per rq 'load' arrray crap; XXX kill this.
- */
-
-/*
- * The exact cpuload calculated at every tick would be:
- *
- * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
- *
- * If a CPU misses updates for n ticks (as it was idle) and update gets
- * called on the n+1-th tick when CPU may be busy, then we have:
- *
- * load_n = (1 - 1/2^i)^n * load_0
- * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
- *
- * decay_load_missed() below does efficient calculation of
- *
- * load' = (1 - 1/2^i)^n * load
- *
- * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
- * This allows us to precompute the above in said factors, thereby allowing the
- * reduction of an arbitrary n in O(log_2 n) steps. (See also
- * fixed_power_int())
- *
- * The calculation is approximated on a 128 point scale.
- */
-#define DEGRADE_SHIFT 7
-
-static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
-static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 64, 32, 8, 0, 0, 0, 0, 0 },
- { 96, 72, 40, 12, 1, 0, 0, 0 },
- { 112, 98, 75, 43, 15, 1, 0, 0 },
- { 120, 112, 98, 76, 45, 16, 2, 0 }
-};
-
-/*
- * Update cpu_load for any missed ticks, due to tickless idle. The backlog
- * would be when CPU is idle and so we just decay the old load without
- * adding any new load.
- */
-static unsigned long
-decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
-{
- int j = 0;
-
- if (!missed_updates)
- return load;
-
- if (missed_updates >= degrade_zero_ticks[idx])
- return 0;
-
- if (idx == 1)
- return load >> missed_updates;
-
- while (missed_updates) {
- if (missed_updates % 2)
- load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
-
- missed_updates >>= 1;
- j++;
- }
- return load;
-}
static struct {
cpumask_var_t idle_cpus_mask;
@@ -5401,201 +5336,12 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */
-/**
- * __cpu_load_update - update the rq->cpu_load[] statistics
- * @this_rq: The rq to update statistics for
- * @this_load: The current load
- * @pending_updates: The number of missed updates
- *
- * Update rq->cpu_load[] statistics. This function is usually called every
- * scheduler tick (TICK_NSEC).
- *
- * This function computes a decaying average:
- *
- * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
- *
- * Because of NOHZ it might not get called on every tick which gives need for
- * the @pending_updates argument.
- *
- * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
- * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
- * = A * (A * load[i]_n-2 + B) + B
- * = A * (A * (A * load[i]_n-3 + B) + B) + B
- * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
- * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
- * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
- * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
- *
- * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
- * any change in load would have resulted in the tick being turned back on.
- *
- * For regular NOHZ, this reduces to:
- *
- * load[i]_n = (1 - 1/2^i)^n * load[i]_0
- *
- * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
- * term.
- */
-static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
- unsigned long pending_updates)
-{
- unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
- int i, scale;
-
- this_rq->nr_load_updates++;
-
- /* Update our load: */
- this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
- for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
- unsigned long old_load, new_load;
-
- /* scale is effectively 1 << i now, and >> i divides by scale */
-
- old_load = this_rq->cpu_load[i];
-#ifdef CONFIG_NO_HZ_COMMON
- old_load = decay_load_missed(old_load, pending_updates - 1, i);
- if (tickless_load) {
- old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
- /*
- * old_load can never be a negative value because a
- * decayed tickless_load cannot be greater than the
- * original tickless_load.
- */
- old_load += tickless_load;
- }
-#endif
- new_load = this_load;
- /*
- * Round up the averaging division if load is increasing. This
- * prevents us from getting stuck on 9 if the load is 10, for
- * example.
- */
- if (new_load > old_load)
- new_load += scale - 1;
-
- this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
- }
-}
-
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
-#ifdef CONFIG_NO_HZ_COMMON
-/*
- * There is no sane way to deal with nohz on smp when using jiffies because the
- * CPU doing the jiffies update might drift wrt the CPU doing the jiffy reading
- * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
- *
- * Therefore we need to avoid the delta approach from the regular tick when
- * possible since that would seriously skew the load calculation. This is why we
- * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
- * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
- * loop exit, nohz_idle_balance, nohz full exit...)
- *
- * This means we might still be one tick off for nohz periods.
- */
-
-static void cpu_load_update_nohz(struct rq *this_rq,
- unsigned long curr_jiffies,
- unsigned long load)
-{
- unsigned long pending_updates;
-
- pending_updates = curr_jiffies - this_rq->last_load_update_tick;
- if (pending_updates) {
- this_rq->last_load_update_tick = curr_jiffies;
- /*
- * In the regular NOHZ case, we were idle, this means load 0.
- * In the NOHZ_FULL case, we were non-idle, we should consider
- * its weighted load.
- */
- cpu_load_update(this_rq, load, pending_updates);
- }
-}
-
-/*
- * Called from nohz_idle_balance() to update the load ratings before doing the
- * idle balance.
- */
-static void cpu_load_update_idle(struct rq *this_rq)
-{
- /*
- * bail if there's load or we're actually up-to-date.
- */
- if (weighted_cpuload(this_rq))
- return;
-
- cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
-}
-
-/*
- * Record CPU load on nohz entry so we know the tickless load to account
- * on nohz exit. cpu_load[0] happens then to be updated more frequently
- * than other cpu_load[idx] but it should be fine as cpu_load readers
- * shouldn't rely into synchronized cpu_load[*] updates.
- */
-void cpu_load_update_nohz_start(void)
-{
- struct rq *this_rq = this_rq();
-
- /*
- * This is all lockless but should be fine. If weighted_cpuload changes
- * concurrently we'll exit nohz. And cpu_load write can race with
- * cpu_load_update_idle() but both updater would be writing the same.
- */
- this_rq->cpu_load[0] = weighted_cpuload(this_rq);
-}
-
-/*
- * Account the tickless load in the end of a nohz frame.
- */
-void cpu_load_update_nohz_stop(void)
-{
- unsigned long curr_jiffies = READ_ONCE(jiffies);
- struct rq *this_rq = this_rq();
- unsigned long load;
- struct rq_flags rf;
-
- if (curr_jiffies == this_rq->last_load_update_tick)
- return;
-
- load = weighted_cpuload(this_rq);
- rq_lock(this_rq, &rf);
- update_rq_clock(this_rq);
- cpu_load_update_nohz(this_rq, curr_jiffies, load);
- rq_unlock(this_rq, &rf);
-}
-#else /* !CONFIG_NO_HZ_COMMON */
-static inline void cpu_load_update_nohz(struct rq *this_rq,
- unsigned long curr_jiffies,
- unsigned long load) { }
-#endif /* CONFIG_NO_HZ_COMMON */
-
-static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
-{
-#ifdef CONFIG_NO_HZ_COMMON
- /* See the mess around cpu_load_update_nohz(). */
- this_rq->last_load_update_tick = READ_ONCE(jiffies);
-#endif
- cpu_load_update(this_rq, load, 1);
-}
-
-/*
- * Called from scheduler_tick()
- */
-void cpu_load_update_active(struct rq *this_rq)
-{
- unsigned long load = weighted_cpuload(this_rq);
-
- if (tick_nohz_tick_stopped())
- cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
- else
- cpu_load_update_periodic(this_rq, load);
-}
-
/*
* Return a low guess at the load of a migration-source CPU weighted
* according to the scheduling class and "nice" value.
@@ -9879,7 +9625,6 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
- cpu_load_update_idle(rq);
rq_unlock_irqrestore(rq, &rf);
if (flags & NOHZ_BALANCE_KICK)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b52ed1ada0be..a83827eec1d1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -96,12 +96,6 @@ extern atomic_long_t calc_load_tasks;
extern void calc_global_load_tick(struct rq *this_rq);
extern long calc_load_fold_active(struct rq *this_rq, long adjust);
-#ifdef CONFIG_SMP
-extern void cpu_load_update_active(struct rq *this_rq);
-#else
-static inline void cpu_load_update_active(struct rq *this_rq) { }
-#endif
-
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f4ee1a3428ae..be9707f68024 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -782,7 +782,6 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
*/
if (!ts->tick_stopped) {
calc_load_nohz_start();
- cpu_load_update_nohz_start();
quiet_vmstat();
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
@@ -829,7 +828,6 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
tick_do_update_jiffies64(now);
- cpu_load_update_nohz_stop();
/*
* Clear the timer idle flag, so we avoid IPIs on remote queueing and
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 1/7] sched: Remove rq->cpu_load[] update code
2019-05-27 6:21 ` [PATCH 1/7] sched: Remove rq->cpu_load[] update code Dietmar Eggemann
@ 2019-05-27 16:09 ` Rik van Riel
2019-06-03 13:02 ` [tip:sched/core] sched/fair: Remove the " tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: Rik van Riel @ 2019-05-27 16:09 UTC (permalink / raw)
To: Dietmar Eggemann, Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 263 bytes --]
On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> With LB_BIAS disabled, there is no need to update the rq-
> >cpu_load[idx]
> any more.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: Rik van Riel <riel@surriel.com>
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* [tip:sched/core] sched/fair: Remove the rq->cpu_load[] update code
2019-05-27 6:21 ` [PATCH 1/7] sched: Remove rq->cpu_load[] update code Dietmar Eggemann
2019-05-27 16:09 ` Rik van Riel
@ 2019-06-03 13:02 ` tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-03 13:02 UTC (permalink / raw)
To: linux-tip-commits
Cc: fweisbec, mingo, patrick.bellasi, hpa, torvalds, vincent.guittot,
valentin.schneider, tglx, morten.rasmussen, peterz,
dietmar.eggemann, quentin.perret, linux-kernel, riel
Commit-ID: 5e83eafbfd3b351537c0d74467fc43e8a88f4ae4
Gitweb: https://git.kernel.org/tip/5e83eafbfd3b351537c0d74467fc43e8a88f4ae4
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Mon, 27 May 2019 07:21:10 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 11:49:38 +0200
sched/fair: Remove the rq->cpu_load[] update code
With LB_BIAS disabled, there is no need to update the rq->cpu_load[idx]
any more.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rik van Riel <riel@surriel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20190527062116.11512-2-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
include/linux/sched/nohz.h | 8 --
kernel/sched/core.c | 1 -
kernel/sched/fair.c | 255 ---------------------------------------------
kernel/sched/sched.h | 6 --
kernel/time/tick-sched.c | 2 -
5 files changed, 272 deletions(-)
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index b36f4cf38111..1abe91ff6e4a 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -6,14 +6,6 @@
* This is the interface between the scheduler and nohz/dynticks:
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void cpu_load_update_nohz_start(void);
-extern void cpu_load_update_nohz_stop(void);
-#else
-static inline void cpu_load_update_nohz_start(void) { }
-static inline void cpu_load_update_nohz_stop(void) { }
-#endif
-
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern int get_nohz_timer_target(void);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 93ab85f0d076..00b8966802a8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3033,7 +3033,6 @@ void scheduler_tick(void)
update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0);
- cpu_load_update_active(rq);
calc_global_load_tick(rq);
psi_task_tick(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 08b1cb06f968..1aab323f1b4b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5322,71 +5322,6 @@ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
#ifdef CONFIG_NO_HZ_COMMON
-/*
- * per rq 'load' arrray crap; XXX kill this.
- */
-
-/*
- * The exact cpuload calculated at every tick would be:
- *
- * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
- *
- * If a CPU misses updates for n ticks (as it was idle) and update gets
- * called on the n+1-th tick when CPU may be busy, then we have:
- *
- * load_n = (1 - 1/2^i)^n * load_0
- * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
- *
- * decay_load_missed() below does efficient calculation of
- *
- * load' = (1 - 1/2^i)^n * load
- *
- * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
- * This allows us to precompute the above in said factors, thereby allowing the
- * reduction of an arbitrary n in O(log_2 n) steps. (See also
- * fixed_power_int())
- *
- * The calculation is approximated on a 128 point scale.
- */
-#define DEGRADE_SHIFT 7
-
-static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
-static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 64, 32, 8, 0, 0, 0, 0, 0 },
- { 96, 72, 40, 12, 1, 0, 0, 0 },
- { 112, 98, 75, 43, 15, 1, 0, 0 },
- { 120, 112, 98, 76, 45, 16, 2, 0 }
-};
-
-/*
- * Update cpu_load for any missed ticks, due to tickless idle. The backlog
- * would be when CPU is idle and so we just decay the old load without
- * adding any new load.
- */
-static unsigned long
-decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
-{
- int j = 0;
-
- if (!missed_updates)
- return load;
-
- if (missed_updates >= degrade_zero_ticks[idx])
- return 0;
-
- if (idx == 1)
- return load >> missed_updates;
-
- while (missed_updates) {
- if (missed_updates % 2)
- load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
-
- missed_updates >>= 1;
- j++;
- }
- return load;
-}
static struct {
cpumask_var_t idle_cpus_mask;
@@ -5398,201 +5333,12 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */
-/**
- * __cpu_load_update - update the rq->cpu_load[] statistics
- * @this_rq: The rq to update statistics for
- * @this_load: The current load
- * @pending_updates: The number of missed updates
- *
- * Update rq->cpu_load[] statistics. This function is usually called every
- * scheduler tick (TICK_NSEC).
- *
- * This function computes a decaying average:
- *
- * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
- *
- * Because of NOHZ it might not get called on every tick which gives need for
- * the @pending_updates argument.
- *
- * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
- * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
- * = A * (A * load[i]_n-2 + B) + B
- * = A * (A * (A * load[i]_n-3 + B) + B) + B
- * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
- * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
- * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
- * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
- *
- * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
- * any change in load would have resulted in the tick being turned back on.
- *
- * For regular NOHZ, this reduces to:
- *
- * load[i]_n = (1 - 1/2^i)^n * load[i]_0
- *
- * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
- * term.
- */
-static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
- unsigned long pending_updates)
-{
- unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
- int i, scale;
-
- this_rq->nr_load_updates++;
-
- /* Update our load: */
- this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
- for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
- unsigned long old_load, new_load;
-
- /* scale is effectively 1 << i now, and >> i divides by scale */
-
- old_load = this_rq->cpu_load[i];
-#ifdef CONFIG_NO_HZ_COMMON
- old_load = decay_load_missed(old_load, pending_updates - 1, i);
- if (tickless_load) {
- old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
- /*
- * old_load can never be a negative value because a
- * decayed tickless_load cannot be greater than the
- * original tickless_load.
- */
- old_load += tickless_load;
- }
-#endif
- new_load = this_load;
- /*
- * Round up the averaging division if load is increasing. This
- * prevents us from getting stuck on 9 if the load is 10, for
- * example.
- */
- if (new_load > old_load)
- new_load += scale - 1;
-
- this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
- }
-}
-
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
-#ifdef CONFIG_NO_HZ_COMMON
-/*
- * There is no sane way to deal with nohz on smp when using jiffies because the
- * CPU doing the jiffies update might drift wrt the CPU doing the jiffy reading
- * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
- *
- * Therefore we need to avoid the delta approach from the regular tick when
- * possible since that would seriously skew the load calculation. This is why we
- * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
- * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
- * loop exit, nohz_idle_balance, nohz full exit...)
- *
- * This means we might still be one tick off for nohz periods.
- */
-
-static void cpu_load_update_nohz(struct rq *this_rq,
- unsigned long curr_jiffies,
- unsigned long load)
-{
- unsigned long pending_updates;
-
- pending_updates = curr_jiffies - this_rq->last_load_update_tick;
- if (pending_updates) {
- this_rq->last_load_update_tick = curr_jiffies;
- /*
- * In the regular NOHZ case, we were idle, this means load 0.
- * In the NOHZ_FULL case, we were non-idle, we should consider
- * its weighted load.
- */
- cpu_load_update(this_rq, load, pending_updates);
- }
-}
-
-/*
- * Called from nohz_idle_balance() to update the load ratings before doing the
- * idle balance.
- */
-static void cpu_load_update_idle(struct rq *this_rq)
-{
- /*
- * bail if there's load or we're actually up-to-date.
- */
- if (weighted_cpuload(this_rq))
- return;
-
- cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
-}
-
-/*
- * Record CPU load on nohz entry so we know the tickless load to account
- * on nohz exit. cpu_load[0] happens then to be updated more frequently
- * than other cpu_load[idx] but it should be fine as cpu_load readers
- * shouldn't rely into synchronized cpu_load[*] updates.
- */
-void cpu_load_update_nohz_start(void)
-{
- struct rq *this_rq = this_rq();
-
- /*
- * This is all lockless but should be fine. If weighted_cpuload changes
- * concurrently we'll exit nohz. And cpu_load write can race with
- * cpu_load_update_idle() but both updater would be writing the same.
- */
- this_rq->cpu_load[0] = weighted_cpuload(this_rq);
-}
-
-/*
- * Account the tickless load in the end of a nohz frame.
- */
-void cpu_load_update_nohz_stop(void)
-{
- unsigned long curr_jiffies = READ_ONCE(jiffies);
- struct rq *this_rq = this_rq();
- unsigned long load;
- struct rq_flags rf;
-
- if (curr_jiffies == this_rq->last_load_update_tick)
- return;
-
- load = weighted_cpuload(this_rq);
- rq_lock(this_rq, &rf);
- update_rq_clock(this_rq);
- cpu_load_update_nohz(this_rq, curr_jiffies, load);
- rq_unlock(this_rq, &rf);
-}
-#else /* !CONFIG_NO_HZ_COMMON */
-static inline void cpu_load_update_nohz(struct rq *this_rq,
- unsigned long curr_jiffies,
- unsigned long load) { }
-#endif /* CONFIG_NO_HZ_COMMON */
-
-static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
-{
-#ifdef CONFIG_NO_HZ_COMMON
- /* See the mess around cpu_load_update_nohz(). */
- this_rq->last_load_update_tick = READ_ONCE(jiffies);
-#endif
- cpu_load_update(this_rq, load, 1);
-}
-
-/*
- * Called from scheduler_tick()
- */
-void cpu_load_update_active(struct rq *this_rq)
-{
- unsigned long load = weighted_cpuload(this_rq);
-
- if (tick_nohz_tick_stopped())
- cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
- else
- cpu_load_update_periodic(this_rq, load);
-}
-
/*
* Return a low guess at the load of a migration-source CPU weighted
* according to the scheduling class and "nice" value.
@@ -9876,7 +9622,6 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
- cpu_load_update_idle(rq);
rq_unlock_irqrestore(rq, &rf);
if (flags & NOHZ_BALANCE_KICK)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c308410675ed..3750b5e53792 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -96,12 +96,6 @@ extern atomic_long_t calc_load_tasks;
extern void calc_global_load_tick(struct rq *this_rq);
extern long calc_load_fold_active(struct rq *this_rq, long adjust);
-#ifdef CONFIG_SMP
-extern void cpu_load_update_active(struct rq *this_rq);
-#else
-static inline void cpu_load_update_active(struct rq *this_rq) { }
-#endif
-
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f4ee1a3428ae..be9707f68024 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -782,7 +782,6 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
*/
if (!ts->tick_stopped) {
calc_load_nohz_start();
- cpu_load_update_nohz_start();
quiet_vmstat();
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
@@ -829,7 +828,6 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
tick_do_update_jiffies64(now);
- cpu_load_update_nohz_stop();
/*
* Clear the timer idle flag, so we avoid IPIs on remote queueing and
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload()
2019-05-27 6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 1/7] sched: Remove rq->cpu_load[] update code Dietmar Eggemann
@ 2019-05-27 6:21 ` Dietmar Eggemann
2019-05-27 16:09 ` Rik van Riel
` (2 more replies)
2019-05-27 6:21 ` [PATCH 3/7] sched/debug: Remove sd->*_idx range on sysctl Dietmar Eggemann
` (4 subsequent siblings)
6 siblings, 3 replies; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-27 6:21 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Rik van Riel,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
With LB_BIAS disabled, source_load() & target_load() return
weighted_cpuload(). Replace both with calls to weighted_cpuload().
The function to obtain the load index (sd->*_idx) for an sd,
get_sd_load_idx(), can be removed as well.
Finally, get rid of the sched feature LB_BIAS.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/fair.c | 90 ++---------------------------------------
kernel/sched/features.h | 1 -
2 files changed, 4 insertions(+), 87 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f619b93ca331..88779c45e8e6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1467,8 +1467,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
}
static unsigned long weighted_cpuload(struct rq *rq);
-static unsigned long source_load(int cpu, int type);
-static unsigned long target_load(int cpu, int type);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -5336,45 +5334,11 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */
-/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
-/*
- * Return a low guess at the load of a migration-source CPU weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(rq);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target CPU weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(rq);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return max(rq->cpu_load[type-1], total);
-}
-
static unsigned long capacity_of(int cpu)
{
return cpu_rq(cpu)->cpu_capacity;
@@ -5482,7 +5446,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- this_eff_load = target_load(this_cpu, sd->wake_idx);
+ this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5500,7 +5464,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = source_load(prev_cpu, sd->wake_idx);
+ prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5561,14 +5525,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
unsigned long this_runnable_load = ULONG_MAX;
unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
unsigned long most_spare = 0, this_spare = 0;
- int load_idx = sd->forkexec_idx;
int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
- if (sd_flag & SD_BALANCE_WAKE)
- load_idx = sd->wake_idx;
-
do {
unsigned long load, avg_load, runnable_load;
unsigned long spare_cap, max_spare_cap;
@@ -5592,12 +5552,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;
for_each_cpu(i, sched_group_span(group)) {
- /* Bias balancing toward CPUs of our domain */
- if (local_group)
- load = source_load(i, load_idx);
- else
- load = target_load(i, load_idx);
-
+ load = weighted_cpuload(cpu_rq(i));
runnable_load += load;
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -7679,34 +7634,6 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
};
}
-/**
- * get_sd_load_idx - Obtain the load index for a given sched domain.
- * @sd: The sched_domain whose load_idx is to be obtained.
- * @idle: The idle status of the CPU for whose sd load_idx is obtained.
- *
- * Return: The load index.
- */
-static inline int get_sd_load_idx(struct sched_domain *sd,
- enum cpu_idle_type idle)
-{
- int load_idx;
-
- switch (idle) {
- case CPU_NOT_IDLE:
- load_idx = sd->busy_idx;
- break;
-
- case CPU_NEWLY_IDLE:
- load_idx = sd->newidle_idx;
- break;
- default:
- load_idx = sd->idle_idx;
- break;
- }
-
- return load_idx;
-}
-
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -7995,9 +7922,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
struct sg_lb_stats *sgs,
int *sg_status)
{
- int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
- int load_idx = get_sd_load_idx(env->sd, env->idle);
- unsigned long load;
int i, nr_running;
memset(sgs, 0, sizeof(*sgs));
@@ -8008,13 +7932,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;
- /* Bias balancing toward CPUs of our domain: */
- if (local_group)
- load = target_load(i, load_idx);
- else
- load = source_load(i, load_idx);
-
- sgs->group_load += load;
+ sgs->group_load += weighted_cpuload(rq);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 858589b83377..2410db5e9a35 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -39,7 +39,6 @@ SCHED_FEAT(WAKEUP_PREEMPTION, true)
SCHED_FEAT(HRTICK, false)
SCHED_FEAT(DOUBLE_TICK, false)
-SCHED_FEAT(LB_BIAS, false)
/*
* Decrement CPU capacity based on time not spent running tasks
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload()
2019-05-27 6:21 ` [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload() Dietmar Eggemann
@ 2019-05-27 16:09 ` Rik van Riel
2019-05-28 10:24 ` Dietmar Eggemann
2019-06-03 13:02 ` [tip:sched/core] sched/fair: Replace source_load() & target_load() with weighted_cpuload() tip-bot for Dietmar Eggemann
2 siblings, 0 replies; 28+ messages in thread
From: Rik van Riel @ 2019-05-27 16:09 UTC (permalink / raw)
To: Dietmar Eggemann, Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 472 bytes --]
On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> With LB_BIAS disabled, source_load() & target_load() return
> weighted_cpuload(). Replace both with calls to weighted_cpuload().
>
> The function to obtain the load index (sd->*_idx) for an sd,
> get_sd_load_idx(), can be removed as well.
>
> Finally, get rid of the sched feature LB_BIAS.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: Rik van Riel <riel@surriel.com>
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload()
2019-05-27 6:21 ` [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload() Dietmar Eggemann
2019-05-27 16:09 ` Rik van Riel
@ 2019-05-28 10:24 ` Dietmar Eggemann
2019-05-28 10:53 ` Peter Zijlstra
2019-06-03 13:02 ` [tip:sched/core] sched/fair: Replace source_load() & target_load() with weighted_cpuload() tip-bot for Dietmar Eggemann
2 siblings, 1 reply; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-28 10:24 UTC (permalink / raw)
To: Hillf Danton
Cc: Peter Zijlstra, Ingo Molnar, Thomas Gleixner,
Frederic Weisbecker, Rik van Riel, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
On 5/28/19 6:42 AM, Hillf Danton wrote:
>
> On Mon, 27 May 2019 07:21:11 +0100 Dietmar Eggemann wrote:
[...]
>> @@ -5500,7 +5464,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
>> this_eff_load *= 100;
>> this_eff_load *= capacity_of(prev_cpu);
>>
>> - prev_eff_load = source_load(prev_cpu, sd->wake_idx);
>> + prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> cpu_rq(prev_cpu)
>
> Seems we have no need to see this cpu's load more than once.
Thanks for catching this! Will fix it in v2.
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload()
2019-05-28 10:24 ` Dietmar Eggemann
@ 2019-05-28 10:53 ` Peter Zijlstra
0 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2019-05-28 10:53 UTC (permalink / raw)
To: Dietmar Eggemann
Cc: Hillf Danton, Ingo Molnar, Thomas Gleixner, Frederic Weisbecker,
Rik van Riel, Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
On Tue, May 28, 2019 at 12:24:52PM +0200, Dietmar Eggemann wrote:
> On 5/28/19 6:42 AM, Hillf Danton wrote:
> >
> > On Mon, 27 May 2019 07:21:11 +0100 Dietmar Eggemann wrote:
>
> [...]
>
> > > @@ -5500,7 +5464,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
> > > this_eff_load *= 100;
> > > this_eff_load *= capacity_of(prev_cpu);
> > > - prev_eff_load = source_load(prev_cpu, sd->wake_idx);
> > > + prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> > cpu_rq(prev_cpu)
> >
> > Seems we have no need to see this cpu's load more than once.
>
> Thanks for catching this! Will fix it in v2.
Fixed :-)
^ permalink raw reply [flat|nested] 28+ messages in thread
* [tip:sched/core] sched/fair: Replace source_load() & target_load() with weighted_cpuload()
2019-05-27 6:21 ` [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload() Dietmar Eggemann
2019-05-27 16:09 ` Rik van Riel
2019-05-28 10:24 ` Dietmar Eggemann
@ 2019-06-03 13:02 ` tip-bot for Dietmar Eggemann
2 siblings, 0 replies; 28+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-03 13:02 UTC (permalink / raw)
To: linux-tip-commits
Cc: morten.rasmussen, valentin.schneider, dietmar.eggemann, torvalds,
vincent.guittot, quentin.perret, fweisbec, hpa, patrick.bellasi,
mingo, riel, linux-kernel, peterz, tglx
Commit-ID: 1c1b8a7b03ef50f80f5d0c871ee261c04a6c967e
Gitweb: https://git.kernel.org/tip/1c1b8a7b03ef50f80f5d0c871ee261c04a6c967e
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Mon, 27 May 2019 07:21:11 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 11:49:39 +0200
sched/fair: Replace source_load() & target_load() with weighted_cpuload()
With LB_BIAS disabled, source_load() & target_load() return
weighted_cpuload(). Replace both with calls to weighted_cpuload().
The function to obtain the load index (sd->*_idx) for an sd,
get_sd_load_idx(), can be removed as well.
Finally, get rid of the sched feature LB_BIAS.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rik van Riel <riel@surriel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20190527062116.11512-3-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/fair.c | 90 +++----------------------------------------------
kernel/sched/features.h | 1 -
2 files changed, 4 insertions(+), 87 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1aab323f1b4b..5b9691e5ea59 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1467,8 +1467,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
}
static unsigned long weighted_cpuload(struct rq *rq);
-static unsigned long source_load(int cpu, int type);
-static unsigned long target_load(int cpu, int type);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -5333,45 +5331,11 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */
-/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
-/*
- * Return a low guess at the load of a migration-source CPU weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(rq);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target CPU weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(rq);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return max(rq->cpu_load[type-1], total);
-}
-
static unsigned long capacity_of(int cpu)
{
return cpu_rq(cpu)->cpu_capacity;
@@ -5479,7 +5443,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- this_eff_load = target_load(this_cpu, sd->wake_idx);
+ this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5497,7 +5461,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = source_load(prev_cpu, sd->wake_idx);
+ prev_eff_load = weighted_cpuload(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5558,14 +5522,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
unsigned long this_runnable_load = ULONG_MAX;
unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
unsigned long most_spare = 0, this_spare = 0;
- int load_idx = sd->forkexec_idx;
int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
- if (sd_flag & SD_BALANCE_WAKE)
- load_idx = sd->wake_idx;
-
do {
unsigned long load, avg_load, runnable_load;
unsigned long spare_cap, max_spare_cap;
@@ -5589,12 +5549,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;
for_each_cpu(i, sched_group_span(group)) {
- /* Bias balancing toward CPUs of our domain */
- if (local_group)
- load = source_load(i, load_idx);
- else
- load = target_load(i, load_idx);
-
+ load = weighted_cpuload(cpu_rq(i));
runnable_load += load;
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -7676,34 +7631,6 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
};
}
-/**
- * get_sd_load_idx - Obtain the load index for a given sched domain.
- * @sd: The sched_domain whose load_idx is to be obtained.
- * @idle: The idle status of the CPU for whose sd load_idx is obtained.
- *
- * Return: The load index.
- */
-static inline int get_sd_load_idx(struct sched_domain *sd,
- enum cpu_idle_type idle)
-{
- int load_idx;
-
- switch (idle) {
- case CPU_NOT_IDLE:
- load_idx = sd->busy_idx;
- break;
-
- case CPU_NEWLY_IDLE:
- load_idx = sd->newidle_idx;
- break;
- default:
- load_idx = sd->idle_idx;
- break;
- }
-
- return load_idx;
-}
-
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -7992,9 +7919,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
struct sg_lb_stats *sgs,
int *sg_status)
{
- int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
- int load_idx = get_sd_load_idx(env->sd, env->idle);
- unsigned long load;
int i, nr_running;
memset(sgs, 0, sizeof(*sgs));
@@ -8005,13 +7929,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;
- /* Bias balancing toward CPUs of our domain: */
- if (local_group)
- load = target_load(i, load_idx);
- else
- load = source_load(i, load_idx);
-
- sgs->group_load += load;
+ sgs->group_load += weighted_cpuload(rq);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 858589b83377..2410db5e9a35 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -39,7 +39,6 @@ SCHED_FEAT(WAKEUP_PREEMPTION, true)
SCHED_FEAT(HRTICK, false)
SCHED_FEAT(DOUBLE_TICK, false)
-SCHED_FEAT(LB_BIAS, false)
/*
* Decrement CPU capacity based on time not spent running tasks
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 3/7] sched/debug: Remove sd->*_idx range on sysctl
2019-05-27 6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 1/7] sched: Remove rq->cpu_load[] update code Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload() Dietmar Eggemann
@ 2019-05-27 6:21 ` Dietmar Eggemann
2019-05-27 16:10 ` Rik van Riel
2019-06-03 13:03 ` [tip:sched/core] " tip-bot for Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 4/7] sched: Remove rq->cpu_load[] Dietmar Eggemann
` (3 subsequent siblings)
6 siblings, 2 replies; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-27 6:21 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Rik van Riel,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
This reverts commit 201c373e8e48 ("sched/debug: Limit sd->*_idx range on
sysctl").
Load indexes (sd->*_idx) are no longer needed without rq->cpu_load[].
The range check for load indexes can be removed as well. Get rid of it
before the rq->cpu_load[] since it uses CPU_LOAD_IDX_MAX.
At the same time, fix the following coding style issues detected by
scripts/checkpatch.pl:
ERROR: space prohibited before that ','
ERROR: space prohibited before that close parenthesis ')'
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/debug.c | 37 ++++++++++++++-----------------------
1 file changed, 14 insertions(+), 23 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 678bfb9bd87f..b070fb61caca 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -236,25 +236,16 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
*tablep = NULL;
}
-static int min_load_idx = 0;
-static int max_load_idx = CPU_LOAD_IDX_MAX-1;
-
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
- umode_t mode, proc_handler *proc_handler,
- bool load_idx)
+ umode_t mode, proc_handler *proc_handler)
{
entry->procname = procname;
entry->data = data;
entry->maxlen = maxlen;
entry->mode = mode;
entry->proc_handler = proc_handler;
-
- if (load_idx) {
- entry->extra1 = &min_load_idx;
- entry->extra2 = &max_load_idx;
- }
}
static struct ctl_table *
@@ -265,19 +256,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
if (table == NULL)
return NULL;
- set_table_entry(&table[0] , "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[1] , "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[2] , "busy_idx", &sd->busy_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[3] , "idle_idx", &sd->idle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[4] , "newidle_idx", &sd->newidle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[5] , "wake_idx", &sd->wake_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[6] , "forkexec_idx", &sd->forkexec_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[7] , "busy_factor", &sd->busy_factor, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[8] , "imbalance_pct", &sd->imbalance_pct, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[9] , "cache_nice_tries", &sd->cache_nice_tries, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[10], "flags", &sd->flags, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false);
+ set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
/* &table[13] is terminator */
return table;
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 3/7] sched/debug: Remove sd->*_idx range on sysctl
2019-05-27 6:21 ` [PATCH 3/7] sched/debug: Remove sd->*_idx range on sysctl Dietmar Eggemann
@ 2019-05-27 16:10 ` Rik van Riel
2019-06-03 13:03 ` [tip:sched/core] " tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: Rik van Riel @ 2019-05-27 16:10 UTC (permalink / raw)
To: Dietmar Eggemann, Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 720 bytes --]
On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> This reverts commit 201c373e8e48 ("sched/debug: Limit sd->*_idx range
> on
> sysctl").
>
> Load indexes (sd->*_idx) are no longer needed without rq->cpu_load[].
> The range check for load indexes can be removed as well. Get rid of
> it
> before the rq->cpu_load[] since it uses CPU_LOAD_IDX_MAX.
>
> At the same time, fix the following coding style issues detected by
> scripts/checkpatch.pl:
>
> ERROR: space prohibited before that ','
> ERROR: space prohibited before that close parenthesis ')'
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: Rik van Riel <riel@surriel.com>
--
All Rights Reversed.
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* [tip:sched/core] sched/debug: Remove sd->*_idx range on sysctl
2019-05-27 6:21 ` [PATCH 3/7] sched/debug: Remove sd->*_idx range on sysctl Dietmar Eggemann
2019-05-27 16:10 ` Rik van Riel
@ 2019-06-03 13:03 ` tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-03 13:03 UTC (permalink / raw)
To: linux-tip-commits
Cc: tglx, dietmar.eggemann, morten.rasmussen, hpa, patrick.bellasi,
peterz, riel, quentin.perret, torvalds, mingo, linux-kernel,
valentin.schneider, fweisbec, vincent.guittot
Commit-ID: 3d8d53554405952993bb0279ef3ebebc51740074
Gitweb: https://git.kernel.org/tip/3d8d53554405952993bb0279ef3ebebc51740074
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Mon, 27 May 2019 07:21:12 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 11:49:39 +0200
sched/debug: Remove sd->*_idx range on sysctl
This reverts:
commit 201c373e8e48 ("sched/debug: Limit sd->*_idx range on sysctl")
Load indexes (sd->*_idx) are no longer needed without rq->cpu_load[].
The range check for load indexes can be removed as well. Get rid of it
before the rq->cpu_load[] since it uses CPU_LOAD_IDX_MAX.
At the same time, fix the following coding style issues detected by
scripts/checkpatch.pl:
ERROR: space prohibited before that ','
ERROR: space prohibited before that close parenthesis ')'
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rik van Riel <riel@surriel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20190527062116.11512-4-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/debug.c | 37 ++++++++++++++-----------------------
1 file changed, 14 insertions(+), 23 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 150043e1d716..5c7b066d7de6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -236,25 +236,16 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
*tablep = NULL;
}
-static int min_load_idx = 0;
-static int max_load_idx = CPU_LOAD_IDX_MAX-1;
-
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
- umode_t mode, proc_handler *proc_handler,
- bool load_idx)
+ umode_t mode, proc_handler *proc_handler)
{
entry->procname = procname;
entry->data = data;
entry->maxlen = maxlen;
entry->mode = mode;
entry->proc_handler = proc_handler;
-
- if (load_idx) {
- entry->extra1 = &min_load_idx;
- entry->extra2 = &max_load_idx;
- }
}
static struct ctl_table *
@@ -265,19 +256,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
if (table == NULL)
return NULL;
- set_table_entry(&table[0] , "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[1] , "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[2] , "busy_idx", &sd->busy_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[3] , "idle_idx", &sd->idle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[4] , "newidle_idx", &sd->newidle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[5] , "wake_idx", &sd->wake_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[6] , "forkexec_idx", &sd->forkexec_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
- set_table_entry(&table[7] , "busy_factor", &sd->busy_factor, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[8] , "imbalance_pct", &sd->imbalance_pct, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[9] , "cache_nice_tries", &sd->cache_nice_tries, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[10], "flags", &sd->flags, sizeof(int) , 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false);
+ set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
/* &table[13] is terminator */
return table;
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 4/7] sched: Remove rq->cpu_load[]
2019-05-27 6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
` (2 preceding siblings ...)
2019-05-27 6:21 ` [PATCH 3/7] sched/debug: Remove sd->*_idx range on sysctl Dietmar Eggemann
@ 2019-05-27 6:21 ` Dietmar Eggemann
2019-05-27 16:10 ` Rik van Riel
2019-06-03 13:04 ` [tip:sched/core] sched/core: " tip-bot for Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 5/7] sched: Remove sd->*_idx Dietmar Eggemann
` (2 subsequent siblings)
6 siblings, 2 replies; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-27 6:21 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Rik van Riel,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
The per rq load array values also disappear from the cpu#X sections in
/proc/sched_debug.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/core.c | 6 +-----
kernel/sched/debug.c | 5 -----
kernel/sched/sched.h | 2 --
3 files changed, 1 insertion(+), 12 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e8bee37b78fd..068b2dd5e456 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5900,8 +5900,8 @@ DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
void __init sched_init(void)
{
- int i, j;
unsigned long alloc_size = 0, ptr;
+ int i;
wait_bit_init();
@@ -6003,10 +6003,6 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
-
- for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
- rq->cpu_load[j] = 0;
-
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index b070fb61caca..c2eeaa44b274 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -656,11 +656,6 @@ do { \
SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
PN(clock);
PN(clock_task);
- P(cpu_load[0]);
- P(cpu_load[1]);
- P(cpu_load[2]);
- P(cpu_load[3]);
- P(cpu_load[4]);
#undef P
#undef PN
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a83827eec1d1..e43c29920fa8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -812,8 +812,6 @@ struct rq {
unsigned int nr_preferred_running;
unsigned int numa_migrate_on;
#endif
- #define CPU_LOAD_IDX_MAX 5
- unsigned long cpu_load[CPU_LOAD_IDX_MAX];
#ifdef CONFIG_NO_HZ_COMMON
#ifdef CONFIG_SMP
unsigned long last_load_update_tick;
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 4/7] sched: Remove rq->cpu_load[]
2019-05-27 6:21 ` [PATCH 4/7] sched: Remove rq->cpu_load[] Dietmar Eggemann
@ 2019-05-27 16:10 ` Rik van Riel
2019-06-03 13:04 ` [tip:sched/core] sched/core: " tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: Rik van Riel @ 2019-05-27 16:10 UTC (permalink / raw)
To: Dietmar Eggemann, Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 270 bytes --]
On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> The per rq load array values also disappear from the cpu#X sections
> in
> /proc/sched_debug.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: Rik van Riel <riel@surriel.com>
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* [tip:sched/core] sched/core: Remove rq->cpu_load[]
2019-05-27 6:21 ` [PATCH 4/7] sched: Remove rq->cpu_load[] Dietmar Eggemann
2019-05-27 16:10 ` Rik van Riel
@ 2019-06-03 13:04 ` tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-03 13:04 UTC (permalink / raw)
To: linux-tip-commits
Cc: peterz, valentin.schneider, linux-kernel, vincent.guittot,
fweisbec, tglx, dietmar.eggemann, torvalds, mingo,
morten.rasmussen, hpa, quentin.perret, patrick.bellasi, riel
Commit-ID: 55627e3cd22c315c4a02fe3bbbb7234ec439cb1d
Gitweb: https://git.kernel.org/tip/55627e3cd22c315c4a02fe3bbbb7234ec439cb1d
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Mon, 27 May 2019 07:21:13 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 11:49:40 +0200
sched/core: Remove rq->cpu_load[]
The per rq load array values also disappear from the cpu#X sections in
/proc/sched_debug.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rik van Riel <riel@surriel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20190527062116.11512-5-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/core.c | 6 +-----
kernel/sched/debug.c | 5 -----
kernel/sched/sched.h | 2 --
3 files changed, 1 insertion(+), 12 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 00b8966802a8..29984d8c41f0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5901,8 +5901,8 @@ DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
void __init sched_init(void)
{
- int i, j;
unsigned long alloc_size = 0, ptr;
+ int i;
wait_bit_init();
@@ -6004,10 +6004,6 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
-
- for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
- rq->cpu_load[j] = 0;
-
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 5c7b066d7de6..a0b0d6e21e5b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -654,11 +654,6 @@ do { \
SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
PN(clock);
PN(clock_task);
- P(cpu_load[0]);
- P(cpu_load[1]);
- P(cpu_load[2]);
- P(cpu_load[3]);
- P(cpu_load[4]);
#undef P
#undef PN
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3750b5e53792..607859a18b2a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -812,8 +812,6 @@ struct rq {
unsigned int nr_preferred_running;
unsigned int numa_migrate_on;
#endif
- #define CPU_LOAD_IDX_MAX 5
- unsigned long cpu_load[CPU_LOAD_IDX_MAX];
#ifdef CONFIG_NO_HZ_COMMON
#ifdef CONFIG_SMP
unsigned long last_load_update_tick;
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 5/7] sched: Remove sd->*_idx
2019-05-27 6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
` (3 preceding siblings ...)
2019-05-27 6:21 ` [PATCH 4/7] sched: Remove rq->cpu_load[] Dietmar Eggemann
@ 2019-05-27 6:21 ` Dietmar Eggemann
2019-05-27 16:12 ` Rik van Riel
2019-06-03 13:04 ` [tip:sched/core] sched/core: " tip-bot for Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load Dietmar Eggemann
2019-05-27 6:21 ` [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load() Dietmar Eggemann
6 siblings, 2 replies; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-27 6:21 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Rik van Riel,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
The sched domain per rq load index files also disappear from the
/proc/sys/kernel/sched_domain/cpuX/domainY directories.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
include/linux/sched/topology.h | 5 -----
kernel/sched/debug.c | 25 ++++++++++---------------
kernel/sched/topology.c | 10 ----------
3 files changed, 10 insertions(+), 30 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index cfc0a89a7159..53afbe07354a 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -84,11 +84,6 @@ struct sched_domain {
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
- unsigned int busy_idx;
- unsigned int idle_idx;
- unsigned int newidle_idx;
- unsigned int wake_idx;
- unsigned int forkexec_idx;
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index c2eeaa44b274..4d91c8e41962 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -251,25 +251,20 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
- struct ctl_table *table = sd_alloc_ctl_entry(14);
+ struct ctl_table *table = sd_alloc_ctl_entry(9);
if (table == NULL)
return NULL;
- set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
- set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
- set_table_entry(&table[2], "busy_idx", &sd->busy_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[3], "idle_idx", &sd->idle_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[5], "wake_idx", &sd->wake_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[7], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
- set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
- /* &table[13] is terminator */
+ set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
+ /* &table[8] is terminator */
return table;
}
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index f53f89df837d..63184cf0d0d7 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1344,11 +1344,6 @@ sd_init(struct sched_domain_topology_level *tl,
.imbalance_pct = 125,
.cache_nice_tries = 0,
- .busy_idx = 0,
- .idle_idx = 0,
- .newidle_idx = 0,
- .wake_idx = 0,
- .forkexec_idx = 0,
.flags = 1*SD_LOAD_BALANCE
| 1*SD_BALANCE_NEWIDLE
@@ -1400,13 +1395,10 @@ sd_init(struct sched_domain_topology_level *tl,
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
sd->imbalance_pct = 117;
sd->cache_nice_tries = 1;
- sd->busy_idx = 2;
#ifdef CONFIG_NUMA
} else if (sd->flags & SD_NUMA) {
sd->cache_nice_tries = 2;
- sd->busy_idx = 3;
- sd->idle_idx = 2;
sd->flags &= ~SD_PREFER_SIBLING;
sd->flags |= SD_SERIALIZE;
@@ -1419,8 +1411,6 @@ sd_init(struct sched_domain_topology_level *tl,
#endif
} else {
sd->cache_nice_tries = 1;
- sd->busy_idx = 2;
- sd->idle_idx = 1;
}
/*
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 5/7] sched: Remove sd->*_idx
2019-05-27 6:21 ` [PATCH 5/7] sched: Remove sd->*_idx Dietmar Eggemann
@ 2019-05-27 16:12 ` Rik van Riel
2019-06-03 13:04 ` [tip:sched/core] sched/core: " tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: Rik van Riel @ 2019-05-27 16:12 UTC (permalink / raw)
To: Dietmar Eggemann, Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 298 bytes --]
On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> The sched domain per rq load index files also disappear from the
> /proc/sys/kernel/sched_domain/cpuX/domainY directories.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: Rik van Riel <riel@surriel.com>
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* [tip:sched/core] sched/core: Remove sd->*_idx
2019-05-27 6:21 ` [PATCH 5/7] sched: Remove sd->*_idx Dietmar Eggemann
2019-05-27 16:12 ` Rik van Riel
@ 2019-06-03 13:04 ` tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 28+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-03 13:04 UTC (permalink / raw)
To: linux-tip-commits
Cc: morten.rasmussen, torvalds, linux-kernel, quentin.perret, riel,
mingo, dietmar.eggemann, tglx, hpa, peterz, valentin.schneider,
fweisbec, patrick.bellasi, vincent.guittot
Commit-ID: 0e1fef63d92d61ed561e504c3a078a827a0f9bfe
Gitweb: https://git.kernel.org/tip/0e1fef63d92d61ed561e504c3a078a827a0f9bfe
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Mon, 27 May 2019 07:21:14 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 11:49:40 +0200
sched/core: Remove sd->*_idx
The sched domain per rq load index files also disappear from the
/proc/sys/kernel/sched_domain/cpuX/domainY directories.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rik van Riel <riel@surriel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20190527062116.11512-6-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
include/linux/sched/topology.h | 5 -----
kernel/sched/debug.c | 25 ++++++++++---------------
kernel/sched/topology.c | 10 ----------
3 files changed, 10 insertions(+), 30 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index cfc0a89a7159..53afbe07354a 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -84,11 +84,6 @@ struct sched_domain {
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
- unsigned int busy_idx;
- unsigned int idle_idx;
- unsigned int newidle_idx;
- unsigned int wake_idx;
- unsigned int forkexec_idx;
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index a0b0d6e21e5b..7ffde8ce82fd 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -251,25 +251,20 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
- struct ctl_table *table = sd_alloc_ctl_entry(14);
+ struct ctl_table *table = sd_alloc_ctl_entry(9);
if (table == NULL)
return NULL;
- set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
- set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
- set_table_entry(&table[2], "busy_idx", &sd->busy_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[3], "idle_idx", &sd->idle_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[5], "wake_idx", &sd->wake_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[7], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
- set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
- /* &table[13] is terminator */
+ set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
+ /* &table[8] is terminator */
return table;
}
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index f53f89df837d..63184cf0d0d7 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1344,11 +1344,6 @@ sd_init(struct sched_domain_topology_level *tl,
.imbalance_pct = 125,
.cache_nice_tries = 0,
- .busy_idx = 0,
- .idle_idx = 0,
- .newidle_idx = 0,
- .wake_idx = 0,
- .forkexec_idx = 0,
.flags = 1*SD_LOAD_BALANCE
| 1*SD_BALANCE_NEWIDLE
@@ -1400,13 +1395,10 @@ sd_init(struct sched_domain_topology_level *tl,
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
sd->imbalance_pct = 117;
sd->cache_nice_tries = 1;
- sd->busy_idx = 2;
#ifdef CONFIG_NUMA
} else if (sd->flags & SD_NUMA) {
sd->cache_nice_tries = 2;
- sd->busy_idx = 3;
- sd->idle_idx = 2;
sd->flags &= ~SD_PREFER_SIBLING;
sd->flags |= SD_SERIALIZE;
@@ -1419,8 +1411,6 @@ sd_init(struct sched_domain_topology_level *tl,
#endif
} else {
sd->cache_nice_tries = 1;
- sd->busy_idx = 2;
- sd->idle_idx = 1;
}
/*
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load
2019-05-27 6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
` (4 preceding siblings ...)
2019-05-27 6:21 ` [PATCH 5/7] sched: Remove sd->*_idx Dietmar Eggemann
@ 2019-05-27 6:21 ` Dietmar Eggemann
2019-05-27 14:07 ` Vincent Guittot
` (2 more replies)
2019-05-27 6:21 ` [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load() Dietmar Eggemann
6 siblings, 3 replies; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-27 6:21 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Rik van Riel,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
Since sg_lb_stats::sum_weighted_load is now identical with
sg_lb_stats::group_load remove it and replace its use case
(calculating load per task) with the latter.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/fair.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 88779c45e8e6..a33f196703a7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7580,7 +7580,6 @@ static unsigned long task_h_load(struct task_struct *p)
struct sg_lb_stats {
unsigned long avg_load; /*Avg load across the CPUs of the group */
unsigned long group_load; /* Total load over the CPUs of the group */
- unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long load_per_task;
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
@@ -7947,7 +7946,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
#endif
- sgs->sum_weighted_load += weighted_cpuload(rq);
/*
* No need to call idle_cpu() if nr_running is not 0
*/
@@ -7966,7 +7964,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
if (sgs->sum_nr_running)
- sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+ sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
sgs->group_weight = group->group_weight;
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load
2019-05-27 6:21 ` [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load Dietmar Eggemann
@ 2019-05-27 14:07 ` Vincent Guittot
2019-05-27 16:13 ` Rik van Riel
2019-06-03 13:05 ` [tip:sched/core] " tip-bot for Dietmar Eggemann
2 siblings, 0 replies; 28+ messages in thread
From: Vincent Guittot @ 2019-05-27 14:07 UTC (permalink / raw)
To: Dietmar Eggemann
Cc: Peter Zijlstra, Ingo Molnar, Thomas Gleixner,
Frederic Weisbecker, Rik van Riel, Morten Rasmussen,
Quentin Perret, Valentin Schneider, Patrick Bellasi,
linux-kernel
On Mon, 27 May 2019 at 08:21, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>
> Since sg_lb_stats::sum_weighted_load is now identical with
> sg_lb_stats::group_load remove it and replace its use case
> (calculating load per task) with the latter.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
FWIW
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/fair.c | 4 +---
> 1 file changed, 1 insertion(+), 3 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 88779c45e8e6..a33f196703a7 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -7580,7 +7580,6 @@ static unsigned long task_h_load(struct task_struct *p)
> struct sg_lb_stats {
> unsigned long avg_load; /*Avg load across the CPUs of the group */
> unsigned long group_load; /* Total load over the CPUs of the group */
> - unsigned long sum_weighted_load; /* Weighted load of group's tasks */
> unsigned long load_per_task;
> unsigned long group_capacity;
> unsigned long group_util; /* Total utilization of the group */
> @@ -7947,7 +7946,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> sgs->nr_numa_running += rq->nr_numa_running;
> sgs->nr_preferred_running += rq->nr_preferred_running;
> #endif
> - sgs->sum_weighted_load += weighted_cpuload(rq);
> /*
> * No need to call idle_cpu() if nr_running is not 0
> */
> @@ -7966,7 +7964,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
>
> if (sgs->sum_nr_running)
> - sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
> + sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
>
> sgs->group_weight = group->group_weight;
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load
2019-05-27 6:21 ` [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load Dietmar Eggemann
2019-05-27 14:07 ` Vincent Guittot
@ 2019-05-27 16:13 ` Rik van Riel
2019-06-03 13:05 ` [tip:sched/core] " tip-bot for Dietmar Eggemann
2 siblings, 0 replies; 28+ messages in thread
From: Rik van Riel @ 2019-05-27 16:13 UTC (permalink / raw)
To: Dietmar Eggemann, Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 373 bytes --]
On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> Since sg_lb_stats::sum_weighted_load is now identical with
> sg_lb_stats::group_load remove it and replace its use case
> (calculating load per task) with the latter.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: Rik van Riel <riel@surriel.com>
--
All Rights Reversed.
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* [tip:sched/core] sched/fair: Remove sgs->sum_weighted_load
2019-05-27 6:21 ` [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load Dietmar Eggemann
2019-05-27 14:07 ` Vincent Guittot
2019-05-27 16:13 ` Rik van Riel
@ 2019-06-03 13:05 ` tip-bot for Dietmar Eggemann
2 siblings, 0 replies; 28+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-03 13:05 UTC (permalink / raw)
To: linux-tip-commits
Cc: quentin.perret, linux-kernel, tglx, torvalds, patrick.bellasi,
dietmar.eggemann, hpa, mingo, morten.rasmussen, peterz, riel,
fweisbec, valentin.schneider, vincent.guittot
Commit-ID: af75d1a9a9f75bf030c2f35705f1ff6d226f96fe
Gitweb: https://git.kernel.org/tip/af75d1a9a9f75bf030c2f35705f1ff6d226f96fe
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Mon, 27 May 2019 07:21:15 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 11:49:41 +0200
sched/fair: Remove sgs->sum_weighted_load
Since sg_lb_stats::sum_weighted_load is now identical with
sg_lb_stats::group_load remove it and replace its use case
(calculating load per task) with the latter.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rik van Riel <riel@surriel.com>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20190527062116.11512-7-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/fair.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5b9691e5ea59..7f8d477f90fe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7577,7 +7577,6 @@ static unsigned long task_h_load(struct task_struct *p)
struct sg_lb_stats {
unsigned long avg_load; /*Avg load across the CPUs of the group */
unsigned long group_load; /* Total load over the CPUs of the group */
- unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long load_per_task;
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
@@ -7944,7 +7943,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
#endif
- sgs->sum_weighted_load += weighted_cpuload(rq);
/*
* No need to call idle_cpu() if nr_running is not 0
*/
@@ -7963,7 +7961,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
if (sgs->sum_nr_running)
- sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+ sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
sgs->group_weight = group->group_weight;
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()
2019-05-27 6:21 [PATCH 0/7] sched: Remove per rq load array Dietmar Eggemann
` (5 preceding siblings ...)
2019-05-27 6:21 ` [PATCH 6/7] sched/fair: Remove sgs->sum_weighted_load Dietmar Eggemann
@ 2019-05-27 6:21 ` Dietmar Eggemann
2019-05-27 13:31 ` Vincent Guittot
2019-05-27 16:24 ` Rik van Riel
6 siblings, 2 replies; 28+ messages in thread
From: Dietmar Eggemann @ 2019-05-27 6:21 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Rik van Riel,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
This is done to align the per cpu (i.e. per rq) load with the util
counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
since there is no 'unweighted' load to distinguish it from.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/fair.c | 44 ++++++++++++++++++++------------------------
1 file changed, 20 insertions(+), 24 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a33f196703a7..f6d0aad13090 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1466,7 +1466,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}
-static unsigned long weighted_cpuload(struct rq *rq);
+static unsigned long cpu_load(int cpu);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -1485,9 +1485,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
- struct rq *rq = cpu_rq(cpu);
-
- ns->load += weighted_cpuload(rq);
+ ns->load += cpu_load(cpu);
ns->compute_capacity += capacity_of(cpu);
}
@@ -5334,9 +5332,9 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */
-static unsigned long weighted_cpuload(struct rq *rq)
+static unsigned long cpu_load(int cpu)
{
- return cfs_rq_runnable_load_avg(&rq->cfs);
+ return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
}
static unsigned long capacity_of(int cpu)
@@ -5348,7 +5346,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
- unsigned long load_avg = weighted_cpuload(rq);
+ unsigned long load_avg = cpu_load(cpu);
if (nr_running)
return load_avg / nr_running;
@@ -5446,7 +5444,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ this_eff_load = cpu_load(this_cpu);
if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5464,7 +5462,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ prev_eff_load = cpu_load(this_cpu);
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5552,7 +5550,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;
for_each_cpu(i, sched_group_span(group)) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_load(i);
runnable_load += load;
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -5688,7 +5686,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_load(i);
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
@@ -7259,8 +7257,8 @@ static struct task_struct *detach_one_task(struct lb_env *env)
static const unsigned int sched_nr_migrate_break = 32;
/*
- * detach_tasks() -- tries to detach up to imbalance weighted load from
- * busiest_rq, as part of a balancing operation within domain "sd".
+ * detach_tasks() -- tries to detach up to imbalance load from busiest_rq,
+ * as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
*/
@@ -7326,8 +7324,7 @@ static int detach_tasks(struct lb_env *env)
#endif
/*
- * We only want to steal up to the prescribed amount of
- * weighted load.
+ * We only want to steal up to the prescribed amount of load.
*/
if (env->imbalance <= 0)
break;
@@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;
- sgs->group_load += weighted_cpuload(rq);
+ sgs->group_load += cpu_load(i);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;
@@ -8385,8 +8382,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
*
- * Also calculates the amount of weighted load which should be moved
- * to restore balance.
+ * Also calculates the amount of load which should be moved to restore balance.
*
* @env: The load balancing environment.
*
@@ -8558,11 +8554,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
rq->nr_running == 1)
continue;
- wl = weighted_cpuload(rq);
+ wl = cpu_load(i);
/*
- * When comparing with imbalance, use weighted_cpuload()
- * which is not scaled with the CPU capacity.
+ * When comparing with imbalance, use cpu_load() which is not
+ * scaled with the CPU capacity.
*/
if (rq->nr_running == 1 && wl > env->imbalance &&
@@ -8571,9 +8567,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
/*
* For the load comparisons with the other CPU's, consider
- * the weighted_cpuload() scaled with the CPU capacity, so
- * that the load can be moved away from the CPU that is
- * potentially running at a lower capacity.
+ * the cpu_load() scaled with the CPU capacity, so that the
+ * load can be moved away from the CPU that is potentially
+ * running at a lower capacity.
*
* Thus we're looking for max(wl_i / capacity_i), crosswise
* multiplication to rid ourselves of the division works out
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()
2019-05-27 6:21 ` [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load() Dietmar Eggemann
@ 2019-05-27 13:31 ` Vincent Guittot
2019-05-27 16:24 ` Rik van Riel
1 sibling, 0 replies; 28+ messages in thread
From: Vincent Guittot @ 2019-05-27 13:31 UTC (permalink / raw)
To: Dietmar Eggemann
Cc: Peter Zijlstra, Ingo Molnar, Thomas Gleixner,
Frederic Weisbecker, Rik van Riel, Morten Rasmussen,
Quentin Perret, Valentin Schneider, Patrick Bellasi,
linux-kernel
On Mon, 27 May 2019 at 08:21, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>
> This is done to align the per cpu (i.e. per rq) load with the util
> counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
> since there is no 'unweighted' load to distinguish it from.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
> ---
> kernel/sched/fair.c | 44 ++++++++++++++++++++------------------------
> 1 file changed, 20 insertions(+), 24 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a33f196703a7..f6d0aad13090 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1466,7 +1466,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
> group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
> }
>
> -static unsigned long weighted_cpuload(struct rq *rq);
> +static unsigned long cpu_load(int cpu);
>
> /* Cached statistics for all CPUs within a node */
> struct numa_stats {
> @@ -1485,9 +1485,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
>
> memset(ns, 0, sizeof(*ns));
> for_each_cpu(cpu, cpumask_of_node(nid)) {
> - struct rq *rq = cpu_rq(cpu);
> -
> - ns->load += weighted_cpuload(rq);
> + ns->load += cpu_load(cpu);
> ns->compute_capacity += capacity_of(cpu);
> }
>
> @@ -5334,9 +5332,9 @@ static struct {
>
> #endif /* CONFIG_NO_HZ_COMMON */
>
> -static unsigned long weighted_cpuload(struct rq *rq)
> +static unsigned long cpu_load(int cpu)
it would be better to use cpu_runnable_load instead of cpu_load
because it returns runnable_load_avg and not load_avg
> {
> - return cfs_rq_runnable_load_avg(&rq->cfs);
> + return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
> }
>
> static unsigned long capacity_of(int cpu)
> @@ -5348,7 +5346,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
> {
> struct rq *rq = cpu_rq(cpu);
> unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
> - unsigned long load_avg = weighted_cpuload(rq);
> + unsigned long load_avg = cpu_load(cpu);
>
> if (nr_running)
> return load_avg / nr_running;
> @@ -5446,7 +5444,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
> s64 this_eff_load, prev_eff_load;
> unsigned long task_load;
>
> - this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> + this_eff_load = cpu_load(this_cpu);
>
> if (sync) {
> unsigned long current_load = task_h_load(current);
> @@ -5464,7 +5462,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
> this_eff_load *= 100;
> this_eff_load *= capacity_of(prev_cpu);
>
> - prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> + prev_eff_load = cpu_load(this_cpu);
> prev_eff_load -= task_load;
> if (sched_feat(WA_BIAS))
> prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
> @@ -5552,7 +5550,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
> max_spare_cap = 0;
>
> for_each_cpu(i, sched_group_span(group)) {
> - load = weighted_cpuload(cpu_rq(i));
> + load = cpu_load(i);
> runnable_load += load;
>
> avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
> @@ -5688,7 +5686,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
> shallowest_idle_cpu = i;
> }
> } else if (shallowest_idle_cpu == -1) {
> - load = weighted_cpuload(cpu_rq(i));
> + load = cpu_load(i);
> if (load < min_load) {
> min_load = load;
> least_loaded_cpu = i;
> @@ -7259,8 +7257,8 @@ static struct task_struct *detach_one_task(struct lb_env *env)
> static const unsigned int sched_nr_migrate_break = 32;
>
> /*
> - * detach_tasks() -- tries to detach up to imbalance weighted load from
> - * busiest_rq, as part of a balancing operation within domain "sd".
> + * detach_tasks() -- tries to detach up to imbalance load from busiest_rq,
> + * as part of a balancing operation within domain "sd".
> *
> * Returns number of detached tasks if successful and 0 otherwise.
> */
> @@ -7326,8 +7324,7 @@ static int detach_tasks(struct lb_env *env)
> #endif
>
> /*
> - * We only want to steal up to the prescribed amount of
> - * weighted load.
> + * We only want to steal up to the prescribed amount of load.
> */
> if (env->imbalance <= 0)
> break;
> @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
> env->flags |= LBF_NOHZ_AGAIN;
>
> - sgs->group_load += weighted_cpuload(rq);
> + sgs->group_load += cpu_load(i);
> sgs->group_util += cpu_util(i);
> sgs->sum_nr_running += rq->cfs.h_nr_running;
>
> @@ -8385,8 +8382,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
> * find_busiest_group - Returns the busiest group within the sched_domain
> * if there is an imbalance.
> *
> - * Also calculates the amount of weighted load which should be moved
> - * to restore balance.
> + * Also calculates the amount of load which should be moved to restore balance.
> *
> * @env: The load balancing environment.
> *
> @@ -8558,11 +8554,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> rq->nr_running == 1)
> continue;
>
> - wl = weighted_cpuload(rq);
> + wl = cpu_load(i);
>
> /*
> - * When comparing with imbalance, use weighted_cpuload()
> - * which is not scaled with the CPU capacity.
> + * When comparing with imbalance, use cpu_load() which is not
> + * scaled with the CPU capacity.
> */
>
> if (rq->nr_running == 1 && wl > env->imbalance &&
> @@ -8571,9 +8567,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
>
> /*
> * For the load comparisons with the other CPU's, consider
> - * the weighted_cpuload() scaled with the CPU capacity, so
> - * that the load can be moved away from the CPU that is
> - * potentially running at a lower capacity.
> + * the cpu_load() scaled with the CPU capacity, so that the
> + * load can be moved away from the CPU that is potentially
> + * running at a lower capacity.
> *
> * Thus we're looking for max(wl_i / capacity_i), crosswise
> * multiplication to rid ourselves of the division works out
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()
2019-05-27 6:21 ` [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load() Dietmar Eggemann
2019-05-27 13:31 ` Vincent Guittot
@ 2019-05-27 16:24 ` Rik van Riel
2019-05-27 19:13 ` Peter Zijlstra
1 sibling, 1 reply; 28+ messages in thread
From: Rik van Riel @ 2019-05-27 16:24 UTC (permalink / raw)
To: Dietmar Eggemann, Peter Zijlstra, Ingo Molnar
Cc: Thomas Gleixner, Frederic Weisbecker, Vincent Guittot,
Morten Rasmussen, Quentin Perret, Valentin Schneider,
Patrick Bellasi, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 1047 bytes --]
On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> This is done to align the per cpu (i.e. per rq) load with the util
> counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
> since there is no 'unweighted' load to distinguish it from.
I can see why you want to make cpu_util() and cpu_load()
have the same parameter, but ...
> @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct
> lb_env *env,
> if ((env->flags & LBF_NOHZ_STATS) &&
> update_nohz_stats(rq, false))
> env->flags |= LBF_NOHZ_AGAIN;
>
> - sgs->group_load += weighted_cpuload(rq);
> + sgs->group_load += cpu_load(i);
> sgs->group_util += cpu_util(i);
> sgs->sum_nr_running += rq->cfs.h_nr_running;
... now we end up dereferencing cpu_rq(cpu) 3 times.
I guess per-cpu variables are so cheap that we should
never notice, but I thought I'd ask anyway while looking
over these patches :)
Thank you for removing a bunch of code that slowed down
my understanding of fair.c
--
All Rights Reversed.
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()
2019-05-27 16:24 ` Rik van Riel
@ 2019-05-27 19:13 ` Peter Zijlstra
2019-06-18 12:23 ` Dietmar Eggemann
0 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2019-05-27 19:13 UTC (permalink / raw)
To: Rik van Riel
Cc: Dietmar Eggemann, Ingo Molnar, Thomas Gleixner,
Frederic Weisbecker, Vincent Guittot, Morten Rasmussen,
Quentin Perret, Valentin Schneider, Patrick Bellasi,
linux-kernel
On Mon, May 27, 2019 at 12:24:07PM -0400, Rik van Riel wrote:
> On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> > This is done to align the per cpu (i.e. per rq) load with the util
> > counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
> > since there is no 'unweighted' load to distinguish it from.
>
> I can see why you want to make cpu_util() and cpu_load()
> have the same parameter, but ...
>
> > @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct
> > lb_env *env,
> > if ((env->flags & LBF_NOHZ_STATS) &&
> > update_nohz_stats(rq, false))
> > env->flags |= LBF_NOHZ_AGAIN;
> >
> > - sgs->group_load += weighted_cpuload(rq);
> > + sgs->group_load += cpu_load(i);
> > sgs->group_util += cpu_util(i);
> > sgs->sum_nr_running += rq->cfs.h_nr_running;
>
> ... now we end up dereferencing cpu_rq(cpu) 3 times.
>
> I guess per-cpu variables are so cheap that we should
> never notice, but I thought I'd ask anyway while looking
> over these patches :)
I was going to say CSE should fix that, but then I noticed per_cpu
contains that hideous RELOC_HIDE() thing and I figure that might
confuse GCC enough to break that :/
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()
2019-05-27 19:13 ` Peter Zijlstra
@ 2019-06-18 12:23 ` Dietmar Eggemann
2019-06-25 8:29 ` [tip:sched/core] sched/fair: Rename weighted_cpuload() to cpu_runnable_load() tip-bot for Dietmar Eggemann
0 siblings, 1 reply; 28+ messages in thread
From: Dietmar Eggemann @ 2019-06-18 12:23 UTC (permalink / raw)
To: Peter Zijlstra, Rik van Riel
Cc: Ingo Molnar, Thomas Gleixner, Frederic Weisbecker,
Vincent Guittot, Morten Rasmussen, Quentin Perret,
Valentin Schneider, Patrick Bellasi, linux-kernel
On 5/27/19 9:13 PM, Peter Zijlstra wrote:
> On Mon, May 27, 2019 at 12:24:07PM -0400, Rik van Riel wrote:
>> On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
>>> This is done to align the per cpu (i.e. per rq) load with the util
>>> counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
>>> since there is no 'unweighted' load to distinguish it from.
>>
>> I can see why you want to make cpu_util() and cpu_load()
>> have the same parameter, but ...
>>
>>> @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct
>>> lb_env *env,
>>> if ((env->flags & LBF_NOHZ_STATS) &&
>>> update_nohz_stats(rq, false))
>>> env->flags |= LBF_NOHZ_AGAIN;
>>>
>>> - sgs->group_load += weighted_cpuload(rq);
>>> + sgs->group_load += cpu_load(i);
>>> sgs->group_util += cpu_util(i);
>>> sgs->sum_nr_running += rq->cfs.h_nr_running;
>>
>> ... now we end up dereferencing cpu_rq(cpu) 3 times.
>>
>> I guess per-cpu variables are so cheap that we should
>> never notice, but I thought I'd ask anyway while looking
>> over these patches :)
>
> I was going to say CSE should fix that, but then I noticed per_cpu
> contains that hideous RELOC_HIDE() thing and I figure that might
> confuse GCC enough to break that :/
--->8---
From 25fcbbd9f654f243a70e38b0d59d38eb3c3f9313 Mon Sep 17 00:00:00 2001
From: Dietmar Eggemann <dietmar.eggemann@arm.com>
Date: Mon, 13 May 2019 11:50:32 +0100
Subject: [PATCH] sched/fair: Rename weighted_cpuload() to cpu_runnable_load()
The term 'weighted' is not needed since there is no 'unweighted' load.
Instead use the term 'runnable' to distinguish 'runnable' load
(avg.runnable_load_avg) used in load balance from load (avg.load_avg)
which is the sum of 'runnable' and 'blocked' load.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
Related to the question whether replacing the 'struct rq *rq' parameter
with 'int cpu' (cpu_rq(cpu)) for cpu_runnable_load() has an influence
on the code:
RELOC_HIDE() (in per_cpu_ptr() -> SHIFT_PERCPU_PTR()) hinders the
compiler to generate similar code (e.g. in update_sg_lb_stats()).
When using 'int cpu' the addressing mode changed from Based Addressing
to Based_indexed-Scaled. Moreover, the text size of fair.o grows by 32
bytes.
kernel/sched/fair.c | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3c11dcdedcbc..0436f8eba556 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1466,7 +1466,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}
-static unsigned long weighted_cpuload(struct rq *rq);
+static unsigned long cpu_runnable_load(struct rq *rq);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -1487,7 +1487,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
- ns->load += weighted_cpuload(rq);
+ ns->load += cpu_runnable_load(rq);
ns->compute_capacity += capacity_of(cpu);
}
@@ -5338,7 +5338,7 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */
-static unsigned long weighted_cpuload(struct rq *rq)
+static unsigned long cpu_runnable_load(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
@@ -5352,7 +5352,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
- unsigned long load_avg = weighted_cpuload(rq);
+ unsigned long load_avg = cpu_runnable_load(rq);
if (nr_running)
return load_avg / nr_running;
@@ -5450,7 +5450,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ this_eff_load = cpu_runnable_load(cpu_rq(this_cpu));
if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5468,7 +5468,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = weighted_cpuload(cpu_rq(prev_cpu));
+ prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5556,7 +5556,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;
for_each_cpu(i, sched_group_span(group)) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
runnable_load += load;
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -5692,7 +5692,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
@@ -7263,7 +7263,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
static const unsigned int sched_nr_migrate_break = 32;
/*
- * detach_tasks() -- tries to detach up to imbalance weighted load from
+ * detach_tasks() -- tries to detach up to imbalance runnable load from
* busiest_rq, as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
@@ -7331,7 +7331,7 @@ static int detach_tasks(struct lb_env *env)
/*
* We only want to steal up to the prescribed amount of
- * weighted load.
+ * runnable load.
*/
if (env->imbalance <= 0)
break;
@@ -7941,7 +7941,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;
- sgs->group_load += weighted_cpuload(rq);
+ sgs->group_load += cpu_runnable_load(rq);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;
@@ -8395,7 +8395,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
*
- * Also calculates the amount of weighted load which should be moved
+ * Also calculates the amount of runnable load which should be moved
* to restore balance.
*
* @env: The load balancing environment.
@@ -8514,7 +8514,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
int i;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
- unsigned long capacity, wl;
+ unsigned long capacity, load;
enum fbq_type rt;
rq = cpu_rq(i);
@@ -8568,30 +8568,30 @@ static struct rq *find_busiest_queue(struct lb_env *env,
rq->nr_running == 1)
continue;
- wl = weighted_cpuload(rq);
+ load = cpu_runnable_load(rq);
/*
- * When comparing with imbalance, use weighted_cpuload()
+ * When comparing with imbalance, use cpu_runnable_load()
* which is not scaled with the CPU capacity.
*/
- if (rq->nr_running == 1 && wl > env->imbalance &&
+ if (rq->nr_running == 1 && load > env->imbalance &&
!check_cpu_capacity(rq, env->sd))
continue;
/*
* For the load comparisons with the other CPU's, consider
- * the weighted_cpuload() scaled with the CPU capacity, so
+ * the cpu_runnable_load() scaled with the CPU capacity, so
* that the load can be moved away from the CPU that is
* potentially running at a lower capacity.
*
- * Thus we're looking for max(wl_i / capacity_i), crosswise
+ * Thus we're looking for max(load_i / capacity_i), crosswise
* multiplication to rid ourselves of the division works out
- * to: wl_i * capacity_j > wl_j * capacity_i; where j is
+ * to: load_i * capacity_j > load_j * capacity_i; where j is
* our previous maximum.
*/
- if (wl * busiest_capacity > busiest_load * capacity) {
- busiest_load = wl;
+ if (load * busiest_capacity > busiest_load * capacity) {
+ busiest_load = load;
busiest_capacity = capacity;
busiest = rq;
}
--
2.17.1
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [tip:sched/core] sched/fair: Rename weighted_cpuload() to cpu_runnable_load()
2019-06-18 12:23 ` Dietmar Eggemann
@ 2019-06-25 8:29 ` tip-bot for Dietmar Eggemann
0 siblings, 0 replies; 28+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-25 8:29 UTC (permalink / raw)
To: linux-tip-commits
Cc: tglx, morten.rasmussen, hpa, fweisbec, linux-kernel,
dietmar.eggemann, valentin.schneider, mingo, quentin.perret,
patrick.bellasi, torvalds, peterz, riel, vincent.guittot
Commit-ID: a3df067974c52df936f548ed218120f623c4c560
Gitweb: https://git.kernel.org/tip/a3df067974c52df936f548ed218120f623c4c560
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Tue, 18 Jun 2019 14:23:10 +0200
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 24 Jun 2019 19:23:43 +0200
sched/fair: Rename weighted_cpuload() to cpu_runnable_load()
The term 'weighted' is not needed since there is no 'unweighted' load.
Instead use the term 'runnable' to distinguish 'runnable' load
(avg.runnable_load_avg) used in load balance from load (avg.load_avg)
which is the sum of 'runnable' and 'blocked' load.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/57f27a7f-2775-d832-e965-0f4d51bb1954@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/fair.c | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 11ec52709323..3bdcd3c718bc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1485,7 +1485,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}
-static unsigned long weighted_cpuload(struct rq *rq);
+static unsigned long cpu_runnable_load(struct rq *rq);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -1506,7 +1506,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
- ns->load += weighted_cpuload(rq);
+ ns->load += cpu_runnable_load(rq);
ns->compute_capacity += capacity_of(cpu);
}
@@ -5366,7 +5366,7 @@ static struct {
#endif /* CONFIG_NO_HZ_COMMON */
-static unsigned long weighted_cpuload(struct rq *rq)
+static unsigned long cpu_runnable_load(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
@@ -5380,7 +5380,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
- unsigned long load_avg = weighted_cpuload(rq);
+ unsigned long load_avg = cpu_runnable_load(rq);
if (nr_running)
return load_avg / nr_running;
@@ -5478,7 +5478,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ this_eff_load = cpu_runnable_load(cpu_rq(this_cpu));
if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5496,7 +5496,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = weighted_cpuload(cpu_rq(prev_cpu));
+ prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5584,7 +5584,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;
for_each_cpu(i, sched_group_span(group)) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
runnable_load += load;
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -5720,7 +5720,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
@@ -7291,7 +7291,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
static const unsigned int sched_nr_migrate_break = 32;
/*
- * detach_tasks() -- tries to detach up to imbalance weighted load from
+ * detach_tasks() -- tries to detach up to imbalance runnable load from
* busiest_rq, as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
@@ -7359,7 +7359,7 @@ static int detach_tasks(struct lb_env *env)
/*
* We only want to steal up to the prescribed amount of
- * weighted load.
+ * runnable load.
*/
if (env->imbalance <= 0)
break;
@@ -7969,7 +7969,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;
- sgs->group_load += weighted_cpuload(rq);
+ sgs->group_load += cpu_runnable_load(rq);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;
@@ -8427,7 +8427,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
*
- * Also calculates the amount of weighted load which should be moved
+ * Also calculates the amount of runnable load which should be moved
* to restore balance.
*
* @env: The load balancing environment.
@@ -8546,7 +8546,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
int i;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
- unsigned long capacity, wl;
+ unsigned long capacity, load;
enum fbq_type rt;
rq = cpu_rq(i);
@@ -8600,30 +8600,30 @@ static struct rq *find_busiest_queue(struct lb_env *env,
rq->nr_running == 1)
continue;
- wl = weighted_cpuload(rq);
+ load = cpu_runnable_load(rq);
/*
- * When comparing with imbalance, use weighted_cpuload()
+ * When comparing with imbalance, use cpu_runnable_load()
* which is not scaled with the CPU capacity.
*/
- if (rq->nr_running == 1 && wl > env->imbalance &&
+ if (rq->nr_running == 1 && load > env->imbalance &&
!check_cpu_capacity(rq, env->sd))
continue;
/*
* For the load comparisons with the other CPU's, consider
- * the weighted_cpuload() scaled with the CPU capacity, so
+ * the cpu_runnable_load() scaled with the CPU capacity, so
* that the load can be moved away from the CPU that is
* potentially running at a lower capacity.
*
- * Thus we're looking for max(wl_i / capacity_i), crosswise
+ * Thus we're looking for max(load_i / capacity_i), crosswise
* multiplication to rid ourselves of the division works out
- * to: wl_i * capacity_j > wl_j * capacity_i; where j is
+ * to: load_i * capacity_j > load_j * capacity_i; where j is
* our previous maximum.
*/
- if (wl * busiest_capacity > busiest_load * capacity) {
- busiest_load = wl;
+ if (load * busiest_capacity > busiest_load * capacity) {
+ busiest_load = load;
busiest_capacity = capacity;
busiest = rq;
}
^ permalink raw reply related [flat|nested] 28+ messages in thread