All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/6] move update blocked load outside newidle_balance
@ 2021-02-05 11:48 Vincent Guittot
  2021-02-05 11:48 ` [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance Vincent Guittot
                   ` (5 more replies)
  0 siblings, 6 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 11:48 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

Joel reported long preempt and irq off sequence in newidle_balance because
of a large number of cgroups in used and having to be updated. This
patchset moves the update outside newidle_imblance in order to enable
early abort during the updates in case of pending irq as an example.

Instead of kicking a normal ILB that will wakes up CPU which is already
idle, patch 5 triggers the update of statistics in the idle thread of
the CPU before selecting and entering an idle state.

Vincent Guittot (6):
  sched/fair: remove update of blocked load from newidle_balance
  sched/fair: remove unused parameter of update_nohz_stats
  sched/fair: merge for each idle cpu loop of ILB
  sched/fair: reorder newidle_balance pulled_task test
  sched/fair: trigger the update of blocked load on newly idle cpu
  sched/fair: reduce the window for duplicated update

 include/linux/sched/nohz.h |   2 +
 kernel/sched/fair.c        | 100 +++++++++++--------------------------
 kernel/sched/idle.c        |   6 +++
 3 files changed, 38 insertions(+), 70 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance
  2021-02-05 11:48 [PATCH 0/6] move update blocked load outside newidle_balance Vincent Guittot
@ 2021-02-05 11:48 ` Vincent Guittot
  2021-02-09 13:09   ` Valentin Schneider
  2021-02-09 13:44   ` Dietmar Eggemann
  2021-02-05 11:48 ` [PATCH 2/6] sched/fair: remove unused parameter of update_nohz_stats Vincent Guittot
                   ` (4 subsequent siblings)
  5 siblings, 2 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 11:48 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

newidle_balance runs with both preempt and irq disabled which prevent
local irq to run during this period. The duration for updating of the
blocked load of CPUs varies according to the number of cgroups and
extends this critical period to an uncontrolled level.

Remove the update from newidle_balance and trigger a normal ILB that
will take care of the update instead.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 33 +++++----------------------------
 1 file changed, 5 insertions(+), 28 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 59b645e3c4fd..bfe1e235fe01 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7392,8 +7392,6 @@ enum migration_type {
 #define LBF_NEED_BREAK	0x02
 #define LBF_DST_PINNED  0x04
 #define LBF_SOME_PINNED	0x08
-#define LBF_NOHZ_STATS	0x10
-#define LBF_NOHZ_AGAIN	0x20
 
 struct lb_env {
 	struct sched_domain	*sd;
@@ -8397,9 +8395,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 		struct rq *rq = cpu_rq(i);
 
-		if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
-			env->flags |= LBF_NOHZ_AGAIN;
-
 		sgs->group_load += cpu_load(rq);
 		sgs->group_util += cpu_util(i);
 		sgs->group_runnable += cpu_runnable(rq);
@@ -8940,11 +8935,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 	struct sg_lb_stats tmp_sgs;
 	int sg_status = 0;
 
-#ifdef CONFIG_NO_HZ_COMMON
-	if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
-		env->flags |= LBF_NOHZ_STATS;
-#endif
-
 	do {
 		struct sg_lb_stats *sgs = &tmp_sgs;
 		int local_group;
@@ -8981,14 +8971,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 	/* Tag domain that child domain prefers tasks go to siblings first */
 	sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
 
-#ifdef CONFIG_NO_HZ_COMMON
-	if ((env->flags & LBF_NOHZ_AGAIN) &&
-	    cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
-
-		WRITE_ONCE(nohz.next_blocked,
-			   jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
-	}
-#endif
 
 	if (env->sd->flags & SD_NUMA)
 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
@@ -10517,16 +10499,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
 	    time_before(jiffies, READ_ONCE(nohz.next_blocked)))
 		return;
 
-	raw_spin_unlock(&this_rq->lock);
 	/*
-	 * This CPU is going to be idle and blocked load of idle CPUs
-	 * need to be updated. Run the ilb locally as it is a good
-	 * candidate for ilb instead of waking up another idle CPU.
-	 * Kick an normal ilb if we failed to do the update.
+	 * Blocked load of idle CPUs need to be updated.
+	 * Kick an ILB to update statistics.
 	 */
-	if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
-		kick_ilb(NOHZ_STATS_KICK);
-	raw_spin_lock(&this_rq->lock);
+	kick_ilb(NOHZ_STATS_KICK);
 }
 
 #else /* !CONFIG_NO_HZ_COMMON */
@@ -10587,8 +10564,6 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
 			update_next_balance(sd, &next_balance);
 		rcu_read_unlock();
 
-		nohz_newidle_balance(this_rq);
-
 		goto out;
 	}
 
@@ -10654,6 +10629,8 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
 
 	if (pulled_task)
 		this_rq->idle_stamp = 0;
+	else
+		nohz_newidle_balance(this_rq);
 
 	rq_repin_lock(this_rq, rf);
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 2/6] sched/fair: remove unused parameter of update_nohz_stats
  2021-02-05 11:48 [PATCH 0/6] move update blocked load outside newidle_balance Vincent Guittot
  2021-02-05 11:48 ` [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance Vincent Guittot
@ 2021-02-05 11:48 ` Vincent Guittot
  2021-02-09 13:45   ` Dietmar Eggemann
  2021-02-05 11:48 ` [PATCH 3/6] sched/fair: merge for each idle cpu loop of ILB Vincent Guittot
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 11:48 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

idle load balance is the only user of update_nohz_stats and doesn't use
force parameter. Remove it

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bfe1e235fe01..60b8c1c68ab9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8352,7 +8352,7 @@ group_type group_classify(unsigned int imbalance_pct,
 	return group_has_spare;
 }
 
-static bool update_nohz_stats(struct rq *rq, bool force)
+static bool update_nohz_stats(struct rq *rq)
 {
 #ifdef CONFIG_NO_HZ_COMMON
 	unsigned int cpu = rq->cpu;
@@ -8363,7 +8363,7 @@ static bool update_nohz_stats(struct rq *rq, bool force)
 	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
 		return false;
 
-	if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
+	if (!time_after(jiffies, rq->last_blocked_load_update_tick))
 		return true;
 
 	update_blocked_averages(cpu);
@@ -10404,7 +10404,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
 
 		rq = cpu_rq(balance_cpu);
 
-		has_blocked_load |= update_nohz_stats(rq, true);
+		has_blocked_load |= update_nohz_stats(rq);
 
 		/*
 		 * If time for next balance is due,
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 3/6] sched/fair: merge for each idle cpu loop of ILB
  2021-02-05 11:48 [PATCH 0/6] move update blocked load outside newidle_balance Vincent Guittot
  2021-02-05 11:48 ` [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance Vincent Guittot
  2021-02-05 11:48 ` [PATCH 2/6] sched/fair: remove unused parameter of update_nohz_stats Vincent Guittot
@ 2021-02-05 11:48 ` Vincent Guittot
  2021-02-09 13:09   ` Valentin Schneider
  2021-02-05 11:48 ` [PATCH 4/6] sched/fair: reorder newidle_balance pulled_task test Vincent Guittot
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 11:48 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

Remove the specific case for handling this_cpu outside for_each_cpu() loop
when running ILB. Instead we use for_each_cpu_wrap() and start with the
next cpu after this_cpu so we will continue to finish with this_cpu.

update_nohz_stats() is now used for this_cpu too and will prevents
unnecessary update. We don't need a special case for handling the update of
nohz.next_balance for this_cpu anymore because it is now handled by the
loop like others.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 32 +++++++-------------------------
 1 file changed, 7 insertions(+), 25 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 60b8c1c68ab9..c587af230010 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10043,22 +10043,9 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
 	 * When the cpu is attached to null domain for ex, it will not be
 	 * updated.
 	 */
-	if (likely(update_next_balance)) {
+	if (likely(update_next_balance))
 		rq->next_balance = next_balance;
 
-#ifdef CONFIG_NO_HZ_COMMON
-		/*
-		 * If this CPU has been elected to perform the nohz idle
-		 * balance. Other idle CPUs have already rebalanced with
-		 * nohz_idle_balance() and nohz.next_balance has been
-		 * updated accordingly. This CPU is now running the idle load
-		 * balance for itself and we need to update the
-		 * nohz.next_balance accordingly.
-		 */
-		if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
-			nohz.next_balance = rq->next_balance;
-#endif
-	}
 }
 
 static inline int on_null_domain(struct rq *rq)
@@ -10388,8 +10375,12 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
 	 */
 	smp_mb();
 
-	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
-		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
+	/*
+	 * Start with the next CPU after this_cpu so we will end with this_cpu and let a
+	 * chance for other idle cpu to pull load.
+	 */
+	for_each_cpu_wrap(balance_cpu,  nohz.idle_cpus_mask, this_cpu+1) {
+		if (!idle_cpu(balance_cpu))
 			continue;
 
 		/*
@@ -10435,15 +10426,6 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
 	if (likely(update_next_balance))
 		nohz.next_balance = next_balance;
 
-	/* Newly idle CPU doesn't need an update */
-	if (idle != CPU_NEWLY_IDLE) {
-		update_blocked_averages(this_cpu);
-		has_blocked_load |= this_rq->has_blocked_load;
-	}
-
-	if (flags & NOHZ_BALANCE_KICK)
-		rebalance_domains(this_rq, CPU_IDLE);
-
 	WRITE_ONCE(nohz.next_blocked,
 		now + msecs_to_jiffies(LOAD_AVG_PERIOD));
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 4/6] sched/fair: reorder newidle_balance pulled_task test
  2021-02-05 11:48 [PATCH 0/6] move update blocked load outside newidle_balance Vincent Guittot
                   ` (2 preceding siblings ...)
  2021-02-05 11:48 ` [PATCH 3/6] sched/fair: merge for each idle cpu loop of ILB Vincent Guittot
@ 2021-02-05 11:48 ` Vincent Guittot
  2021-02-09 13:46   ` Dietmar Eggemann
  2021-02-05 11:48 ` [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu Vincent Guittot
  2021-02-05 11:48 ` [PATCH 6/6] sched/fair: reduce the window for duplicated update Vincent Guittot
  5 siblings, 1 reply; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 11:48 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

Reorder the tests and skip prevent useless test when no load balance has
been performed.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c587af230010..935594cd5430 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10592,7 +10592,6 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
 	if (curr_cost > this_rq->max_idle_balance_cost)
 		this_rq->max_idle_balance_cost = curr_cost;
 
-out:
 	/*
 	 * While browsing the domains, we released the rq lock, a task could
 	 * have been enqueued in the meantime. Since we're not going idle,
@@ -10601,14 +10600,15 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
 	if (this_rq->cfs.h_nr_running && !pulled_task)
 		pulled_task = 1;
 
-	/* Move the next balance forward */
-	if (time_after(this_rq->next_balance, next_balance))
-		this_rq->next_balance = next_balance;
-
 	/* Is there a task of a high priority class? */
 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
 		pulled_task = -1;
 
+out:
+	/* Move the next balance forward */
+	if (time_after(this_rq->next_balance, next_balance))
+		this_rq->next_balance = next_balance;
+
 	if (pulled_task)
 		this_rq->idle_stamp = 0;
 	else
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu
  2021-02-05 11:48 [PATCH 0/6] move update blocked load outside newidle_balance Vincent Guittot
                   ` (3 preceding siblings ...)
  2021-02-05 11:48 ` [PATCH 4/6] sched/fair: reorder newidle_balance pulled_task test Vincent Guittot
@ 2021-02-05 11:48 ` Vincent Guittot
  2021-02-09 13:09   ` Valentin Schneider
  2021-02-09 13:47   ` Dietmar Eggemann
  2021-02-05 11:48 ` [PATCH 6/6] sched/fair: reduce the window for duplicated update Vincent Guittot
  5 siblings, 2 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 11:48 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

Instead of waking up a random and already idle CPU, we can take advantage
of this_cpu being about to enter idle to run the ILB and update the
blocked load.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 include/linux/sched/nohz.h |  2 ++
 kernel/sched/fair.c        | 11 ++++++++---
 kernel/sched/idle.c        |  6 ++++++
 3 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 6d67e9a5af6b..74cdc4e87310 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -9,8 +9,10 @@
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 extern void nohz_balance_enter_idle(int cpu);
 extern int get_nohz_timer_target(void);
+extern void nohz_run_idle_balance(int cpu);
 #else
 static inline void nohz_balance_enter_idle(int cpu) { }
+static inline void nohz_run_idle_balance(int cpu) { }
 #endif
 
 #ifdef CONFIG_NO_HZ_COMMON
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 935594cd5430..3d2ab28d5736 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10461,6 +10461,11 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 	return true;
 }
 
+void nohz_run_idle_balance(int cpu)
+{
+	nohz_idle_balance(cpu_rq(cpu), CPU_IDLE);
+}
+
 static void nohz_newidle_balance(struct rq *this_rq)
 {
 	int this_cpu = this_rq->cpu;
@@ -10482,10 +10487,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
 		return;
 
 	/*
-	 * Blocked load of idle CPUs need to be updated.
-	 * Kick an ILB to update statistics.
+	 * Set the need to trigger ILB in order to update blocked load
+	 * before entering idle state.
 	 */
-	kick_ilb(NOHZ_STATS_KICK);
+	this_rq->nohz_idle_balance = NOHZ_STATS_KICK;
 }
 
 #else /* !CONFIG_NO_HZ_COMMON */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 305727ea0677..52a4e9ce2f9b 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -261,6 +261,12 @@ static void cpuidle_idle_call(void)
 static void do_idle(void)
 {
 	int cpu = smp_processor_id();
+
+	/*
+	 * Check if we need to update some blocked load
+	 */
+	nohz_run_idle_balance(cpu);
+
 	/*
 	 * If the arch has a polling bit, we maintain an invariant:
 	 *
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 6/6] sched/fair: reduce the window for duplicated update
  2021-02-05 11:48 [PATCH 0/6] move update blocked load outside newidle_balance Vincent Guittot
                   ` (4 preceding siblings ...)
  2021-02-05 11:48 ` [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu Vincent Guittot
@ 2021-02-05 11:48 ` Vincent Guittot
  2021-02-05 15:13   ` kernel test robot
  2021-02-05 16:13   ` [PATCH 6/6 v2] " Vincent Guittot
  5 siblings, 2 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 11:48 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

Start to update last_blocked_load_update_tick to reduce the possibility
of another cpu starting the update one more time

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3d2ab28d5736..968808c2c022 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7852,17 +7852,9 @@ static inline bool others_have_blocked(struct rq *rq)
 	return false;
 }
 
-static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
-{
-	rq->last_blocked_load_update_tick = jiffies;
-
-	if (!has_blocked)
-		rq->has_blocked_load = 0;
-}
 #else
 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
 static inline bool others_have_blocked(struct rq *rq) { return false; }
-static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
 #endif
 
 static bool __update_blocked_others(struct rq *rq, bool *done)
@@ -8022,12 +8014,16 @@ static void update_blocked_averages(int cpu)
 	struct rq_flags rf;
 
 	rq_lock_irqsave(rq, &rf);
+	WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
+
 	update_rq_clock(rq);
 
 	decayed |= __update_blocked_others(rq, &done);
 	decayed |= __update_blocked_fair(rq, &done);
 
-	update_blocked_load_status(rq, !done);
+	if (done)
+		rq->has_blocked_load = 0;
+
 	if (decayed)
 		cpufreq_update_util(rq, 0);
 	rq_unlock_irqrestore(rq, &rf);
@@ -8363,7 +8359,7 @@ static bool update_nohz_stats(struct rq *rq)
 	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
 		return false;
 
-	if (!time_after(jiffies, rq->last_blocked_load_update_tick))
+	if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
 		return true;
 
 	update_blocked_averages(cpu);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 6/6] sched/fair: reduce the window for duplicated update
  2021-02-05 11:48 ` [PATCH 6/6] sched/fair: reduce the window for duplicated update Vincent Guittot
@ 2021-02-05 15:13   ` kernel test robot
  2021-02-05 16:13   ` [PATCH 6/6 v2] " Vincent Guittot
  1 sibling, 0 replies; 21+ messages in thread
From: kernel test robot @ 2021-02-05 15:13 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 10938 bytes --]

Hi Vincent,

I love your patch! Yet something to improve:

[auto build test ERROR on tip/sched/core]
[also build test ERROR on tip/master v5.11-rc6 next-20210125]
[cannot apply to tip/timers/nohz]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Vincent-Guittot/move-update-blocked-load-outside-newidle_balance/20210205-200205
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 075a28439d0c8eb6d3c799e1eed24bb9bc7750cd
config: x86_64-randconfig-a014-20210205 (attached as .config)
compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project c9439ca36342fb6013187d0a69aef92736951476)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install x86_64 cross compiling tool for clang build
        # apt-get install binutils-x86-64-linux-gnu
        # https://github.com/0day-ci/linux/commit/806753cfbff0017da882b79fe05d4f40a19d72f9
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Vincent-Guittot/move-update-blocked-load-outside-newidle_balance/20210205-200205
        git checkout 806753cfbff0017da882b79fe05d4f40a19d72f9
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> kernel/sched/fair.c:8017:17: error: no member named 'last_blocked_load_update_tick' in 'struct rq'
           WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
                      ~~  ^
   include/asm-generic/rwonce.h:60:33: note: expanded from macro 'WRITE_ONCE'
           compiletime_assert_rwonce_type(x);                              \
                                          ^
   include/asm-generic/rwonce.h:36:35: note: expanded from macro 'compiletime_assert_rwonce_type'
           compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),  \
                                            ^
   include/linux/compiler_types.h:288:10: note: expanded from macro '__native_word'
           (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
                   ^
   include/linux/compiler_types.h:326:22: note: expanded from macro 'compiletime_assert'
           _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
                               ^~~~~~~~~
   include/linux/compiler_types.h:314:23: note: expanded from macro '_compiletime_assert'
           __compiletime_assert(condition, msg, prefix, suffix)
                                ^~~~~~~~~
   include/linux/compiler_types.h:306:9: note: expanded from macro '__compiletime_assert'
                   if (!(condition))                                       \
                         ^~~~~~~~~
>> kernel/sched/fair.c:8017:17: error: no member named 'last_blocked_load_update_tick' in 'struct rq'
           WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
                      ~~  ^
   include/asm-generic/rwonce.h:60:33: note: expanded from macro 'WRITE_ONCE'
           compiletime_assert_rwonce_type(x);                              \
                                          ^
   include/asm-generic/rwonce.h:36:35: note: expanded from macro 'compiletime_assert_rwonce_type'
           compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),  \
                                            ^
   include/linux/compiler_types.h:288:39: note: expanded from macro '__native_word'
           (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
                                                ^
   include/linux/compiler_types.h:326:22: note: expanded from macro 'compiletime_assert'
           _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
                               ^~~~~~~~~
   include/linux/compiler_types.h:314:23: note: expanded from macro '_compiletime_assert'
           __compiletime_assert(condition, msg, prefix, suffix)
                                ^~~~~~~~~
   include/linux/compiler_types.h:306:9: note: expanded from macro '__compiletime_assert'
                   if (!(condition))                                       \
                         ^~~~~~~~~
>> kernel/sched/fair.c:8017:17: error: no member named 'last_blocked_load_update_tick' in 'struct rq'
           WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
                      ~~  ^
   include/asm-generic/rwonce.h:60:33: note: expanded from macro 'WRITE_ONCE'
           compiletime_assert_rwonce_type(x);                              \
                                          ^
   include/asm-generic/rwonce.h:36:35: note: expanded from macro 'compiletime_assert_rwonce_type'
           compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),  \
                                            ^
   include/linux/compiler_types.h:289:10: note: expanded from macro '__native_word'
            sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
                   ^
   include/linux/compiler_types.h:326:22: note: expanded from macro 'compiletime_assert'
           _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
                               ^~~~~~~~~
   include/linux/compiler_types.h:314:23: note: expanded from macro '_compiletime_assert'
           __compiletime_assert(condition, msg, prefix, suffix)
                                ^~~~~~~~~
   include/linux/compiler_types.h:306:9: note: expanded from macro '__compiletime_assert'
                   if (!(condition))                                       \
                         ^~~~~~~~~
>> kernel/sched/fair.c:8017:17: error: no member named 'last_blocked_load_update_tick' in 'struct rq'
           WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
                      ~~  ^
   include/asm-generic/rwonce.h:60:33: note: expanded from macro 'WRITE_ONCE'
           compiletime_assert_rwonce_type(x);                              \
                                          ^
   include/asm-generic/rwonce.h:36:35: note: expanded from macro 'compiletime_assert_rwonce_type'
           compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),  \
                                            ^
   include/linux/compiler_types.h:289:38: note: expanded from macro '__native_word'
            sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
                                               ^
   include/linux/compiler_types.h:326:22: note: expanded from macro 'compiletime_assert'
           _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
                               ^~~~~~~~~
   include/linux/compiler_types.h:314:23: note: expanded from macro '_compiletime_assert'
           __compiletime_assert(condition, msg, prefix, suffix)
                                ^~~~~~~~~
   include/linux/compiler_types.h:306:9: note: expanded from macro '__compiletime_assert'
                   if (!(condition))                                       \
                         ^~~~~~~~~
>> kernel/sched/fair.c:8017:17: error: no member named 'last_blocked_load_update_tick' in 'struct rq'
           WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
                      ~~  ^
   include/asm-generic/rwonce.h:60:33: note: expanded from macro 'WRITE_ONCE'
           compiletime_assert_rwonce_type(x);                              \
                                          ^
   include/asm-generic/rwonce.h:36:48: note: expanded from macro 'compiletime_assert_rwonce_type'
           compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),  \
                                                         ^
   include/linux/compiler_types.h:326:22: note: expanded from macro 'compiletime_assert'
           _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
                               ^~~~~~~~~
   include/linux/compiler_types.h:314:23: note: expanded from macro '_compiletime_assert'
           __compiletime_assert(condition, msg, prefix, suffix)
                                ^~~~~~~~~
   include/linux/compiler_types.h:306:9: note: expanded from macro '__compiletime_assert'
                   if (!(condition))                                       \
                         ^~~~~~~~~
>> kernel/sched/fair.c:8017:17: error: no member named 'last_blocked_load_update_tick' in 'struct rq'
           WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
                      ~~  ^
   include/asm-generic/rwonce.h:61:15: note: expanded from macro 'WRITE_ONCE'
           __WRITE_ONCE(x, val);                                           \
                        ^
   include/asm-generic/rwonce.h:55:20: note: expanded from macro '__WRITE_ONCE'
           *(volatile typeof(x) *)&(x) = (val);                            \
                             ^
>> kernel/sched/fair.c:8017:17: error: no member named 'last_blocked_load_update_tick' in 'struct rq'
           WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
                      ~~  ^
   include/asm-generic/rwonce.h:61:15: note: expanded from macro 'WRITE_ONCE'
           __WRITE_ONCE(x, val);                                           \
                        ^
   include/asm-generic/rwonce.h:55:27: note: expanded from macro '__WRITE_ONCE'
           *(volatile typeof(x) *)&(x) = (val);                            \
                                    ^
>> kernel/sched/fair.c:8025:7: error: no member named 'has_blocked_load' in 'struct rq'
                   rq->has_blocked_load = 0;
                   ~~  ^
   8 errors generated.


vim +8017 kernel/sched/fair.c

  8009	
  8010	static void update_blocked_averages(int cpu)
  8011	{
  8012		bool decayed = false, done = true;
  8013		struct rq *rq = cpu_rq(cpu);
  8014		struct rq_flags rf;
  8015	
  8016		rq_lock_irqsave(rq, &rf);
> 8017		WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
  8018	
  8019		update_rq_clock(rq);
  8020	
  8021		decayed |= __update_blocked_others(rq, &done);
  8022		decayed |= __update_blocked_fair(rq, &done);
  8023	
  8024		if (done)
> 8025			rq->has_blocked_load = 0;
  8026	
  8027		if (decayed)
  8028			cpufreq_update_util(rq, 0);
  8029		rq_unlock_irqrestore(rq, &rf);
  8030	}
  8031	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 30569 bytes --]

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 6/6 v2] sched/fair: reduce the window for duplicated update
  2021-02-05 11:48 ` [PATCH 6/6] sched/fair: reduce the window for duplicated update Vincent Guittot
  2021-02-05 15:13   ` kernel test robot
@ 2021-02-05 16:13   ` Vincent Guittot
  1 sibling, 0 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-05 16:13 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, dietmar.eggemann, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef, Vincent Guittot

Start to update last_blocked_load_update_tick to reduce the possibility
of another cpu starting the update one more time

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---

v2:
- fixed compilation error for !CONFIG_NO_HZ_COMMON reported by 
  kernel test robot <lkp@intel.com>
  
 kernel/sched/fair.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3d2ab28d5736..f939a1faa014 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7852,16 +7852,20 @@ static inline bool others_have_blocked(struct rq *rq)
 	return false;
 }
 
-static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
+static inline void update_blocked_load_tick(struct rq *rq)
 {
-	rq->last_blocked_load_update_tick = jiffies;
+	WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
+}
 
+static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
+{
 	if (!has_blocked)
 		rq->has_blocked_load = 0;
 }
 #else
 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
 static inline bool others_have_blocked(struct rq *rq) { return false; }
+static inline void update_blocked_load_tick(struct rq *rq) {}
 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
 #endif
 
@@ -8022,6 +8026,7 @@ static void update_blocked_averages(int cpu)
 	struct rq_flags rf;
 
 	rq_lock_irqsave(rq, &rf);
+	update_blocked_load_tick(rq);
 	update_rq_clock(rq);
 
 	decayed |= __update_blocked_others(rq, &done);
@@ -8363,7 +8368,7 @@ static bool update_nohz_stats(struct rq *rq)
 	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
 		return false;
 
-	if (!time_after(jiffies, rq->last_blocked_load_update_tick))
+	if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
 		return true;
 
 	update_blocked_averages(cpu);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance
  2021-02-05 11:48 ` [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance Vincent Guittot
@ 2021-02-09 13:09   ` Valentin Schneider
  2021-02-09 13:20     ` Vincent Guittot
  2021-02-09 13:44   ` Dietmar Eggemann
  1 sibling, 1 reply; 21+ messages in thread
From: Valentin Schneider @ 2021-02-09 13:09 UTC (permalink / raw)
  To: Vincent Guittot, mingo, peterz, juri.lelli, dietmar.eggemann,
	rostedt, bsegall, mgorman, fweisbec, tglx, bristot, linux-kernel,
	joel
  Cc: qais.yousef, Vincent Guittot

On 05/02/21 12:48, Vincent Guittot wrote:
> @@ -10517,16 +10499,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
>           time_before(jiffies, READ_ONCE(nohz.next_blocked)))
>               return;
>

I was wondering whether all the conditions above were still relevant. I
think they are, but this one:

        /* Will wake up very soon. No time for doing anything else*/
        if (this_rq->avg_idle < sysctl_sched_migration_cost)
                return;

should have its comment updated to something like:

        /*
         * Will wake up very soon. Blocked load will be updated
         * periodically, no need to wake an idle CPU.
         */

given kick_ilb() isn't the costliest of things.

> -	raw_spin_unlock(&this_rq->lock);
>       /*
> -	 * This CPU is going to be idle and blocked load of idle CPUs
> -	 * need to be updated. Run the ilb locally as it is a good
> -	 * candidate for ilb instead of waking up another idle CPU.
> -	 * Kick an normal ilb if we failed to do the update.
> +	 * Blocked load of idle CPUs need to be updated.
> +	 * Kick an ILB to update statistics.
>        */
> -	if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
> -		kick_ilb(NOHZ_STATS_KICK);
> -	raw_spin_lock(&this_rq->lock);

With this change, the return value of _nohz_idle_balance() is no longer
used. This means we could get rid of the tracking of whether it iterated
over all nohz CPUs or not.

> +	kick_ilb(NOHZ_STATS_KICK);
>  }
>
>  #else /* !CONFIG_NO_HZ_COMMON */

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 3/6] sched/fair: merge for each idle cpu loop of ILB
  2021-02-05 11:48 ` [PATCH 3/6] sched/fair: merge for each idle cpu loop of ILB Vincent Guittot
@ 2021-02-09 13:09   ` Valentin Schneider
  0 siblings, 0 replies; 21+ messages in thread
From: Valentin Schneider @ 2021-02-09 13:09 UTC (permalink / raw)
  To: Vincent Guittot, mingo, peterz, juri.lelli, dietmar.eggemann,
	rostedt, bsegall, mgorman, fweisbec, tglx, bristot, linux-kernel,
	joel
  Cc: qais.yousef, Vincent Guittot

On 05/02/21 12:48, Vincent Guittot wrote:
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 60b8c1c68ab9..c587af230010 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -10043,22 +10043,9 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
>        * When the cpu is attached to null domain for ex, it will not be
>        * updated.
>        */
> -	if (likely(update_next_balance)) {
> +	if (likely(update_next_balance))
>               rq->next_balance = next_balance;
>
> -#ifdef CONFIG_NO_HZ_COMMON
> -		/*
> -		 * If this CPU has been elected to perform the nohz idle
> -		 * balance. Other idle CPUs have already rebalanced with
> -		 * nohz_idle_balance() and nohz.next_balance has been
> -		 * updated accordingly. This CPU is now running the idle load
> -		 * balance for itself and we need to update the
> -		 * nohz.next_balance accordingly.
> -		 */
> -		if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
> -			nohz.next_balance = rq->next_balance;
> -#endif
> -	}

I was never fond of this bit, so FWIW I'm happy to see it go!

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu
  2021-02-05 11:48 ` [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu Vincent Guittot
@ 2021-02-09 13:09   ` Valentin Schneider
  2021-02-09 13:57     ` Vincent Guittot
  2021-02-09 13:47   ` Dietmar Eggemann
  1 sibling, 1 reply; 21+ messages in thread
From: Valentin Schneider @ 2021-02-09 13:09 UTC (permalink / raw)
  To: Vincent Guittot, mingo, peterz, juri.lelli, dietmar.eggemann,
	rostedt, bsegall, mgorman, fweisbec, tglx, bristot, linux-kernel,
	joel
  Cc: qais.yousef, Vincent Guittot

On 05/02/21 12:48, Vincent Guittot wrote:
> Instead of waking up a random and already idle CPU, we can take advantage
> of this_cpu being about to enter idle to run the ILB and update the
> blocked load.
>
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
>  include/linux/sched/nohz.h |  2 ++
>  kernel/sched/fair.c        | 11 ++++++++---
>  kernel/sched/idle.c        |  6 ++++++
>  3 files changed, 16 insertions(+), 3 deletions(-)
>
> diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
> index 6d67e9a5af6b..74cdc4e87310 100644
> --- a/include/linux/sched/nohz.h
> +++ b/include/linux/sched/nohz.h
> @@ -9,8 +9,10 @@
>  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
>  extern void nohz_balance_enter_idle(int cpu);
>  extern int get_nohz_timer_target(void);
> +extern void nohz_run_idle_balance(int cpu);
>  #else
>  static inline void nohz_balance_enter_idle(int cpu) { }
> +static inline void nohz_run_idle_balance(int cpu) { }
>  #endif
>
>  #ifdef CONFIG_NO_HZ_COMMON
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 935594cd5430..3d2ab28d5736 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -10461,6 +10461,11 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
>       return true;
>  }
>
> +void nohz_run_idle_balance(int cpu)
> +{
> +	nohz_idle_balance(cpu_rq(cpu), CPU_IDLE);
> +}
> +
>  static void nohz_newidle_balance(struct rq *this_rq)
>  {
>       int this_cpu = this_rq->cpu;
> @@ -10482,10 +10487,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
>               return;
>
>       /*
> -	 * Blocked load of idle CPUs need to be updated.
> -	 * Kick an ILB to update statistics.
> +	 * Set the need to trigger ILB in order to update blocked load
> +	 * before entering idle state.
>        */
> -	kick_ilb(NOHZ_STATS_KICK);
> +	this_rq->nohz_idle_balance = NOHZ_STATS_KICK;
>  }
>
>  #else /* !CONFIG_NO_HZ_COMMON */
> diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
> index 305727ea0677..52a4e9ce2f9b 100644
> --- a/kernel/sched/idle.c
> +++ b/kernel/sched/idle.c
> @@ -261,6 +261,12 @@ static void cpuidle_idle_call(void)
>  static void do_idle(void)
>  {
>       int cpu = smp_processor_id();
> +
> +	/*
> +	 * Check if we need to update some blocked load
> +	 */
> +	nohz_run_idle_balance(cpu);
> +

What do we gain from doing this here vs having a stats update in
newidle_balance()?

The current approach is to have a combined load_balance() + blocked load
update during newidle, and I get that this can take too long. But then,
we could still have what you're adding to do_idle() in the tail of
newidle_balance() itself, no? i.e.

  newidle_balance()
    ...
    for_each_domain(this_cpu, sd) {
       ...
       pulled_task = load_balance(...);
       ...
    }
    ...
    if (!pulled_task && !this_rq->nr_running) {
      this_rq->nohz_idle_balance = NOHZ_STATS_KICK;
      _nohz_idle_balance();
    }

or somesuch.

>       /*
>        * If the arch has a polling bit, we maintain an invariant:
>        *
> --
> 2.17.1

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance
  2021-02-09 13:09   ` Valentin Schneider
@ 2021-02-09 13:20     ` Vincent Guittot
  0 siblings, 0 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-09 13:20 UTC (permalink / raw)
  To: Valentin Schneider
  Cc: Ingo Molnar, Peter Zijlstra, Juri Lelli, Dietmar Eggemann,
	Steven Rostedt, Ben Segall, Mel Gorman, Frederic Weisbecker,
	Thomas Gleixner, Daniel Bristot de Oliveira, linux-kernel,
	Joel Fernandes, Qais Yousef

On Tue, 9 Feb 2021 at 14:09, Valentin Schneider
<valentin.schneider@arm.com> wrote:
>
> On 05/02/21 12:48, Vincent Guittot wrote:
> > @@ -10517,16 +10499,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
> >           time_before(jiffies, READ_ONCE(nohz.next_blocked)))
> >               return;
> >
>
> I was wondering whether all the conditions above were still relevant. I
> think they are, but this one:
>
>         /* Will wake up very soon. No time for doing anything else*/
>         if (this_rq->avg_idle < sysctl_sched_migration_cost)
>                 return;
>
> should have its comment updated to something like:
>
>         /*
>          * Will wake up very soon. Blocked load will be updated
>          * periodically, no need to wake an idle CPU.
>          */
>
> given kick_ilb() isn't the costliest of things.
>
> > -     raw_spin_unlock(&this_rq->lock);
> >       /*
> > -      * This CPU is going to be idle and blocked load of idle CPUs
> > -      * need to be updated. Run the ilb locally as it is a good
> > -      * candidate for ilb instead of waking up another idle CPU.
> > -      * Kick an normal ilb if we failed to do the update.
> > +      * Blocked load of idle CPUs need to be updated.
> > +      * Kick an ILB to update statistics.
> >        */
> > -     if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
> > -             kick_ilb(NOHZ_STATS_KICK);
> > -     raw_spin_lock(&this_rq->lock);
>
> With this change, the return value of _nohz_idle_balance() is no longer
> used. This means we could get rid of the tracking of whether it iterated
> over all nohz CPUs or not.

Yeah, the return is useless now

>
> > +     kick_ilb(NOHZ_STATS_KICK);
> >  }
> >
> >  #else /* !CONFIG_NO_HZ_COMMON */

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance
  2021-02-05 11:48 ` [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance Vincent Guittot
  2021-02-09 13:09   ` Valentin Schneider
@ 2021-02-09 13:44   ` Dietmar Eggemann
  1 sibling, 0 replies; 21+ messages in thread
From: Dietmar Eggemann @ 2021-02-09 13:44 UTC (permalink / raw)
  To: Vincent Guittot, mingo, peterz, juri.lelli, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef

On 05/02/2021 12:48, Vincent Guittot wrote:
> newidle_balance runs with both preempt and irq disabled which prevent
> local irq to run during this period. The duration for updating of the
> blocked load of CPUs varies according to the number of cgroups and

Maybe s/number of cgroups/number of CPU cgroups with non-decayed
cfs_rq's (i.e. cfs_rq within the leaf cfs_rq list)

> extends this critical period to an uncontrolled level.
> 
> Remove the update from newidle_balance and trigger a normal ILB that
> will take care of the update instead.
> 
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

otherwise, LGTM.

[...]

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 2/6] sched/fair: remove unused parameter of update_nohz_stats
  2021-02-05 11:48 ` [PATCH 2/6] sched/fair: remove unused parameter of update_nohz_stats Vincent Guittot
@ 2021-02-09 13:45   ` Dietmar Eggemann
  2021-02-09 17:38     ` Vincent Guittot
  0 siblings, 1 reply; 21+ messages in thread
From: Dietmar Eggemann @ 2021-02-09 13:45 UTC (permalink / raw)
  To: Vincent Guittot, mingo, peterz, juri.lelli, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef

On 05/02/2021 12:48, Vincent Guittot wrote:
> idle load balance is the only user of update_nohz_stats and doesn't use
> force parameter. Remove it

Wasn't the 'force=true' from ilb eclipsing the jiffy resolution rate
limiting '!time_after(jiffies, rq->last_blocked_load_update_tick)' of
update_blocked_averages()?

So IMHO this has the (maybe intended) side effect that (formerly forced
updates) are now rate limited on one jiffy resolution too.

> 
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
>  kernel/sched/fair.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index bfe1e235fe01..60b8c1c68ab9 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8352,7 +8352,7 @@ group_type group_classify(unsigned int imbalance_pct,
>  	return group_has_spare;
>  }
>  
> -static bool update_nohz_stats(struct rq *rq, bool force)
> +static bool update_nohz_stats(struct rq *rq)
>  {
>  #ifdef CONFIG_NO_HZ_COMMON
>  	unsigned int cpu = rq->cpu;
> @@ -8363,7 +8363,7 @@ static bool update_nohz_stats(struct rq *rq, bool force)
>  	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
>  		return false;
>  
> -	if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
> +	if (!time_after(jiffies, rq->last_blocked_load_update_tick))
>  		return true;
>  
>  	update_blocked_averages(cpu);
> @@ -10404,7 +10404,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
>  
>  		rq = cpu_rq(balance_cpu);
>  
> -		has_blocked_load |= update_nohz_stats(rq, true);
> +		has_blocked_load |= update_nohz_stats(rq);
>  
>  		/*
>  		 * If time for next balance is due,
> 


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/6] sched/fair: reorder newidle_balance pulled_task test
  2021-02-05 11:48 ` [PATCH 4/6] sched/fair: reorder newidle_balance pulled_task test Vincent Guittot
@ 2021-02-09 13:46   ` Dietmar Eggemann
  0 siblings, 0 replies; 21+ messages in thread
From: Dietmar Eggemann @ 2021-02-09 13:46 UTC (permalink / raw)
  To: Vincent Guittot, mingo, peterz, juri.lelli, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef

On 05/02/2021 12:48, Vincent Guittot wrote:
> Reorder the tests and skip prevent useless test when no load balance has
> been performed.

LGTM.

But IMHO the reason why those two if conditions can be skipped for the
'goto out' path is that we don't release the rq lock rather the actual
lb. Might be worth saying this in the patch header? It's already
mentioned on top of the first if condition though.

> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
>  kernel/sched/fair.c | 10 +++++-----
>  1 file changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index c587af230010..935594cd5430 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -10592,7 +10592,6 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
>  	if (curr_cost > this_rq->max_idle_balance_cost)
>  		this_rq->max_idle_balance_cost = curr_cost;
>  
> -out:
>  	/*
>  	 * While browsing the domains, we released the rq lock, a task could
>  	 * have been enqueued in the meantime. Since we're not going idle,
> @@ -10601,14 +10600,15 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
>  	if (this_rq->cfs.h_nr_running && !pulled_task)
>  		pulled_task = 1;
>  
> -	/* Move the next balance forward */
> -	if (time_after(this_rq->next_balance, next_balance))
> -		this_rq->next_balance = next_balance;
> -
>  	/* Is there a task of a high priority class? */
>  	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
>  		pulled_task = -1;
>  
> +out:
> +	/* Move the next balance forward */
> +	if (time_after(this_rq->next_balance, next_balance))
> +		this_rq->next_balance = next_balance;
> +
>  	if (pulled_task)
>  		this_rq->idle_stamp = 0;
>  	else
> 


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu
  2021-02-05 11:48 ` [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu Vincent Guittot
  2021-02-09 13:09   ` Valentin Schneider
@ 2021-02-09 13:47   ` Dietmar Eggemann
  2021-02-09 14:22     ` Vincent Guittot
  1 sibling, 1 reply; 21+ messages in thread
From: Dietmar Eggemann @ 2021-02-09 13:47 UTC (permalink / raw)
  To: Vincent Guittot, mingo, peterz, juri.lelli, rostedt, bsegall,
	mgorman, fweisbec, tglx, bristot, linux-kernel, joel
  Cc: qais.yousef

On 05/02/2021 12:48, Vincent Guittot wrote:
> Instead of waking up a random and already idle CPU, we can take advantage
> of this_cpu being about to enter idle to run the ILB and update the
> blocked load.
> 
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
>  include/linux/sched/nohz.h |  2 ++
>  kernel/sched/fair.c        | 11 ++++++++---
>  kernel/sched/idle.c        |  6 ++++++
>  3 files changed, 16 insertions(+), 3 deletions(-)
> 
> diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
> index 6d67e9a5af6b..74cdc4e87310 100644
> --- a/include/linux/sched/nohz.h
> +++ b/include/linux/sched/nohz.h
> @@ -9,8 +9,10 @@
>  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
>  extern void nohz_balance_enter_idle(int cpu);
>  extern int get_nohz_timer_target(void);
> +extern void nohz_run_idle_balance(int cpu);
>  #else
>  static inline void nohz_balance_enter_idle(int cpu) { }
> +static inline void nohz_run_idle_balance(int cpu) { }
>  #endif

(1) Since nohz_run_idle_balance() would be an interface one sched class
(fair) exports to another (idle) I wonder if kernel/sched/sched.h would
be the more appropriate include file to export/define it?

nohz_balance_exit_idle() is exported via kernel/sched/sched.h (used only
within the scheduler) whereas nohz_balance_enter_idle() is exported via
include/linux/sched/nohz.h (used in kernel/time/tick-sched.c).

Isn't include/linux/sched/nohz.h the interface between kernel/sched/ and
kernel/time?

There is one exception already though: calc_load_nohz_remote() defined
in kernel/sched/loadavg.c and (only) used in kernel/sched/core.c.


(2) Is there a need for an extra function nohz_run_idle_balance()?
do_idle() could call nohz_idle_balance() directly in case in would be
exported instead.

[...]

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu
  2021-02-09 13:09   ` Valentin Schneider
@ 2021-02-09 13:57     ` Vincent Guittot
  2021-02-09 17:25       ` Valentin Schneider
  0 siblings, 1 reply; 21+ messages in thread
From: Vincent Guittot @ 2021-02-09 13:57 UTC (permalink / raw)
  To: Valentin Schneider
  Cc: Ingo Molnar, Peter Zijlstra, Juri Lelli, Dietmar Eggemann,
	Steven Rostedt, Ben Segall, Mel Gorman, Frederic Weisbecker,
	Thomas Gleixner, Daniel Bristot de Oliveira, linux-kernel,
	Joel Fernandes, Qais Yousef

On Tue, 9 Feb 2021 at 14:09, Valentin Schneider
<valentin.schneider@arm.com> wrote:
>
> On 05/02/21 12:48, Vincent Guittot wrote:
> > Instead of waking up a random and already idle CPU, we can take advantage
> > of this_cpu being about to enter idle to run the ILB and update the
> > blocked load.
> >
> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> > ---
> >  include/linux/sched/nohz.h |  2 ++
> >  kernel/sched/fair.c        | 11 ++++++++---
> >  kernel/sched/idle.c        |  6 ++++++
> >  3 files changed, 16 insertions(+), 3 deletions(-)
> >
> > diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
> > index 6d67e9a5af6b..74cdc4e87310 100644
> > --- a/include/linux/sched/nohz.h
> > +++ b/include/linux/sched/nohz.h
> > @@ -9,8 +9,10 @@
> >  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
> >  extern void nohz_balance_enter_idle(int cpu);
> >  extern int get_nohz_timer_target(void);
> > +extern void nohz_run_idle_balance(int cpu);
> >  #else
> >  static inline void nohz_balance_enter_idle(int cpu) { }
> > +static inline void nohz_run_idle_balance(int cpu) { }
> >  #endif
> >
> >  #ifdef CONFIG_NO_HZ_COMMON
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 935594cd5430..3d2ab28d5736 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -10461,6 +10461,11 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
> >       return true;
> >  }
> >
> > +void nohz_run_idle_balance(int cpu)
> > +{
> > +     nohz_idle_balance(cpu_rq(cpu), CPU_IDLE);
> > +}
> > +
> >  static void nohz_newidle_balance(struct rq *this_rq)
> >  {
> >       int this_cpu = this_rq->cpu;
> > @@ -10482,10 +10487,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
> >               return;
> >
> >       /*
> > -      * Blocked load of idle CPUs need to be updated.
> > -      * Kick an ILB to update statistics.
> > +      * Set the need to trigger ILB in order to update blocked load
> > +      * before entering idle state.
> >        */
> > -     kick_ilb(NOHZ_STATS_KICK);
> > +     this_rq->nohz_idle_balance = NOHZ_STATS_KICK;
> >  }
> >
> >  #else /* !CONFIG_NO_HZ_COMMON */
> > diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
> > index 305727ea0677..52a4e9ce2f9b 100644
> > --- a/kernel/sched/idle.c
> > +++ b/kernel/sched/idle.c
> > @@ -261,6 +261,12 @@ static void cpuidle_idle_call(void)
> >  static void do_idle(void)
> >  {
> >       int cpu = smp_processor_id();
> > +
> > +     /*
> > +      * Check if we need to update some blocked load
> > +      */
> > +     nohz_run_idle_balance(cpu);
> > +
>
> What do we gain from doing this here vs having a stats update in
> newidle_balance()?

As mentioned by Joel, newidle_balance is called in the schedule
context with preempt and irq off  which prevent any local activity
like irq/timer. Whereas in this new place, we have the same condition
as during ILB with only preemptoff and _nohz_idle_balance() regularly
checks if it has to abort because something has been scheduled on the
cpu.


>
> The current approach is to have a combined load_balance() + blocked load
> update during newidle, and I get that this can take too long. But then,
> we could still have what you're adding to do_idle() in the tail of
> newidle_balance() itself, no? i.e.
>
>   newidle_balance()
>     ...
>     for_each_domain(this_cpu, sd) {
>        ...
>        pulled_task = load_balance(...);
>        ...
>     }
>     ...
>     if (!pulled_task && !this_rq->nr_running) {
>       this_rq->nohz_idle_balance = NOHZ_STATS_KICK;
>       _nohz_idle_balance();
>     }
>
> or somesuch.
>
> >       /*
> >        * If the arch has a polling bit, we maintain an invariant:
> >        *
> > --
> > 2.17.1

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu
  2021-02-09 13:47   ` Dietmar Eggemann
@ 2021-02-09 14:22     ` Vincent Guittot
  0 siblings, 0 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-09 14:22 UTC (permalink / raw)
  To: Dietmar Eggemann
  Cc: Ingo Molnar, Peter Zijlstra, Juri Lelli, Steven Rostedt,
	Ben Segall, Mel Gorman, Frederic Weisbecker, Thomas Gleixner,
	Daniel Bristot de Oliveira, linux-kernel, Joel Fernandes,
	Qais Yousef

On Tue, 9 Feb 2021 at 14:47, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>
> On 05/02/2021 12:48, Vincent Guittot wrote:
> > Instead of waking up a random and already idle CPU, we can take advantage
> > of this_cpu being about to enter idle to run the ILB and update the
> > blocked load.
> >
> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> > ---
> >  include/linux/sched/nohz.h |  2 ++
> >  kernel/sched/fair.c        | 11 ++++++++---
> >  kernel/sched/idle.c        |  6 ++++++
> >  3 files changed, 16 insertions(+), 3 deletions(-)
> >
> > diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
> > index 6d67e9a5af6b..74cdc4e87310 100644
> > --- a/include/linux/sched/nohz.h
> > +++ b/include/linux/sched/nohz.h
> > @@ -9,8 +9,10 @@
> >  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
> >  extern void nohz_balance_enter_idle(int cpu);
> >  extern int get_nohz_timer_target(void);
> > +extern void nohz_run_idle_balance(int cpu);
> >  #else
> >  static inline void nohz_balance_enter_idle(int cpu) { }
> > +static inline void nohz_run_idle_balance(int cpu) { }
> >  #endif
>
> (1) Since nohz_run_idle_balance() would be an interface one sched class
> (fair) exports to another (idle) I wonder if kernel/sched/sched.h would
> be the more appropriate include file to export/define it?

Yes probably. I have been influenced by the "nohz" filename but
kernel/sched/sched.h is better

>
> nohz_balance_exit_idle() is exported via kernel/sched/sched.h (used only
> within the scheduler) whereas nohz_balance_enter_idle() is exported via
> include/linux/sched/nohz.h (used in kernel/time/tick-sched.c).
>
> Isn't include/linux/sched/nohz.h the interface between kernel/sched/ and
> kernel/time?
>
> There is one exception already though: calc_load_nohz_remote() defined
> in kernel/sched/loadavg.c and (only) used in kernel/sched/core.c.
>
>
> (2) Is there a need for an extra function nohz_run_idle_balance()?
> do_idle() could call nohz_idle_balance() directly in case in would be
> exported instead.

I didn't want to expose the 2 parameters of nohz_idle_balance in
do_idle() and especially the enum cpu_idle_type but it seems that it
is already available so I can probably call
nohz_idle_balance(cpu_rq(cpu), CPU_IDLE); directly


>
> [...]

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu
  2021-02-09 13:57     ` Vincent Guittot
@ 2021-02-09 17:25       ` Valentin Schneider
  0 siblings, 0 replies; 21+ messages in thread
From: Valentin Schneider @ 2021-02-09 17:25 UTC (permalink / raw)
  To: Vincent Guittot
  Cc: Ingo Molnar, Peter Zijlstra, Juri Lelli, Dietmar Eggemann,
	Steven Rostedt, Ben Segall, Mel Gorman, Frederic Weisbecker,
	Thomas Gleixner, Daniel Bristot de Oliveira, linux-kernel,
	Joel Fernandes, Qais Yousef

On 09/02/21 14:57, Vincent Guittot wrote:
> On Tue, 9 Feb 2021 at 14:09, Valentin Schneider
> <valentin.schneider@arm.com> wrote:
>> On 05/02/21 12:48, Vincent Guittot wrote:
>> > @@ -261,6 +261,12 @@ static void cpuidle_idle_call(void)
>> >  static void do_idle(void)
>> >  {
>> >       int cpu = smp_processor_id();
>> > +
>> > +     /*
>> > +      * Check if we need to update some blocked load
>> > +      */
>> > +     nohz_run_idle_balance(cpu);
>> > +
>>
>> What do we gain from doing this here vs having a stats update in
>> newidle_balance()?
>
> As mentioned by Joel, newidle_balance is called in the schedule
> context with preempt and irq off  which prevent any local activity
> like irq/timer. Whereas in this new place, we have the same condition
> as during ILB with only preemptoff and _nohz_idle_balance() regularly
> checks if it has to abort because something has been scheduled on the
> cpu.
>

Gotcha, that's already hinted at in the cover letter. Could you point this
out in the changelog? Other than that, I don't see anything wrong with this
approach.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 2/6] sched/fair: remove unused parameter of update_nohz_stats
  2021-02-09 13:45   ` Dietmar Eggemann
@ 2021-02-09 17:38     ` Vincent Guittot
  0 siblings, 0 replies; 21+ messages in thread
From: Vincent Guittot @ 2021-02-09 17:38 UTC (permalink / raw)
  To: Dietmar Eggemann
  Cc: Ingo Molnar, Peter Zijlstra, Juri Lelli, Steven Rostedt,
	Ben Segall, Mel Gorman, Frederic Weisbecker, Thomas Gleixner,
	Daniel Bristot de Oliveira, linux-kernel, Joel Fernandes,
	Qais Yousef

On Tue, 9 Feb 2021 at 14:45, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>
> On 05/02/2021 12:48, Vincent Guittot wrote:
> > idle load balance is the only user of update_nohz_stats and doesn't use
> > force parameter. Remove it
>
> Wasn't the 'force=true' from ilb eclipsing the jiffy resolution rate
> limiting '!time_after(jiffies, rq->last_blocked_load_update_tick)' of
> update_blocked_averages()?
>
> So IMHO this has the (maybe intended) side effect that (formerly forced
> updates) are now rate limited on one jiffy resolution too.

Calls to _nohz_idle_balance were already rate limited by load balance
interval and nohz.next_blocked.
This tick rate limit has been originally added for newidle_balance
case but there were some corner cases for _nohz_idle_balance that
could benefit of this too

>
> >
> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> > ---
> >  kernel/sched/fair.c | 6 +++---
> >  1 file changed, 3 insertions(+), 3 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index bfe1e235fe01..60b8c1c68ab9 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -8352,7 +8352,7 @@ group_type group_classify(unsigned int imbalance_pct,
> >       return group_has_spare;
> >  }
> >
> > -static bool update_nohz_stats(struct rq *rq, bool force)
> > +static bool update_nohz_stats(struct rq *rq)
> >  {
> >  #ifdef CONFIG_NO_HZ_COMMON
> >       unsigned int cpu = rq->cpu;
> > @@ -8363,7 +8363,7 @@ static bool update_nohz_stats(struct rq *rq, bool force)
> >       if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
> >               return false;
> >
> > -     if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
> > +     if (!time_after(jiffies, rq->last_blocked_load_update_tick))
> >               return true;
> >
> >       update_blocked_averages(cpu);
> > @@ -10404,7 +10404,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
> >
> >               rq = cpu_rq(balance_cpu);
> >
> > -             has_blocked_load |= update_nohz_stats(rq, true);
> > +             has_blocked_load |= update_nohz_stats(rq);
> >
> >               /*
> >                * If time for next balance is due,
> >
>

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2021-02-09 17:40 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-05 11:48 [PATCH 0/6] move update blocked load outside newidle_balance Vincent Guittot
2021-02-05 11:48 ` [PATCH 1/6] sched/fair: remove update of blocked load from newidle_balance Vincent Guittot
2021-02-09 13:09   ` Valentin Schneider
2021-02-09 13:20     ` Vincent Guittot
2021-02-09 13:44   ` Dietmar Eggemann
2021-02-05 11:48 ` [PATCH 2/6] sched/fair: remove unused parameter of update_nohz_stats Vincent Guittot
2021-02-09 13:45   ` Dietmar Eggemann
2021-02-09 17:38     ` Vincent Guittot
2021-02-05 11:48 ` [PATCH 3/6] sched/fair: merge for each idle cpu loop of ILB Vincent Guittot
2021-02-09 13:09   ` Valentin Schneider
2021-02-05 11:48 ` [PATCH 4/6] sched/fair: reorder newidle_balance pulled_task test Vincent Guittot
2021-02-09 13:46   ` Dietmar Eggemann
2021-02-05 11:48 ` [RFC PATCH 5/6] sched/fair: trigger the update of blocked load on newly idle cpu Vincent Guittot
2021-02-09 13:09   ` Valentin Schneider
2021-02-09 13:57     ` Vincent Guittot
2021-02-09 17:25       ` Valentin Schneider
2021-02-09 13:47   ` Dietmar Eggemann
2021-02-09 14:22     ` Vincent Guittot
2021-02-05 11:48 ` [PATCH 6/6] sched/fair: reduce the window for duplicated update Vincent Guittot
2021-02-05 15:13   ` kernel test robot
2021-02-05 16:13   ` [PATCH 6/6 v2] " Vincent Guittot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.