From: Scott Wood <swood@redhat.com>
To: Steven Rostedt <rostedt@goodmis.org>,
Ingo Molnar <mingo@redhat.com>,
Peter Zijlstra <peterz@infradead.org>,
Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>,
Rik van Riel <riel@surriel.com>, Mel Gorman <mgorman@suse.de>,
linux-kernel@vger.kernel.org,
linux-rt-users <linux-rt-users@vger.kernel.org>,
Scott Wood <swood@redhat.com>
Subject: [RFC PATCH 1/3] sched/fair: Call newidle_balance() from finish_task_switch()
Date: Tue, 28 Apr 2020 00:02:40 -0500 [thread overview]
Message-ID: <20200428050242.17717-2-swood@redhat.com> (raw)
In-Reply-To: <20200428050242.17717-1-swood@redhat.com>
Thus, newidle_balance() is entered with interrupts enabled, which allows
(in the next patch) enabling interrupts when the lock is dropped.
Signed-off-by: Scott Wood <swood@redhat.com>
---
kernel/sched/core.c | 7 ++++---
kernel/sched/fair.c | 45 ++++++++++++++++----------------------------
kernel/sched/sched.h | 6 ++----
3 files changed, 22 insertions(+), 36 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9a2fbf98fd6f..0294beb8d16c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3241,6 +3241,10 @@ static struct rq *finish_task_switch(struct task_struct *prev)
}
tick_nohz_task_switch();
+
+ if (is_idle_task(current))
+ newidle_balance();
+
return rq;
}
@@ -3919,8 +3923,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq->nr_running == rq->cfs.h_nr_running)) {
p = pick_next_task_fair(rq, prev, rf);
- if (unlikely(p == RETRY_TASK))
- goto restart;
/* Assumes fair_sched_class->next == idle_sched_class */
if (!p) {
@@ -3931,7 +3933,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
return p;
}
-restart:
#ifdef CONFIG_SMP
/*
* We must do the balancing pass before put_next_task(), such
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 02f323b85b6d..74c3c5280d6b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6758,8 +6758,6 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
if (rq->nr_running)
return 1;
-
- return newidle_balance(rq, rf) != 0;
}
#endif /* CONFIG_SMP */
@@ -6934,9 +6932,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
struct task_struct *p;
- int new_tasks;
-again:
if (!sched_fair_runnable(rq))
goto idle;
@@ -7050,19 +7046,6 @@ done: __maybe_unused;
if (!rf)
return NULL;
- new_tasks = newidle_balance(rq, rf);
-
- /*
- * Because newidle_balance() releases (and re-acquires) rq->lock, it is
- * possible for any higher priority task to appear. In that case we
- * must re-start the pick_next_entity() loop.
- */
- if (new_tasks < 0)
- return RETRY_TASK;
-
- if (new_tasks > 0)
- goto again;
-
/*
* rq is about to be idle, check if we need to update the
* lost_idle_time of clock_pelt
@@ -10425,14 +10408,23 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
* 0 - failed, no new tasks
* > 0 - success, new (fair) tasks present
*/
-int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+int newidle_balance(void)
{
unsigned long next_balance = jiffies + HZ;
- int this_cpu = this_rq->cpu;
+ int this_cpu;
struct sched_domain *sd;
+ struct rq *this_rq;
int pulled_task = 0;
u64 curr_cost = 0;
+ preempt_disable();
+ this_rq = this_rq();
+ this_cpu = this_rq->cpu;
+ local_bh_disable();
+ raw_spin_lock_irq(&this_rq->lock);
+
+ update_rq_clock(this_rq);
+
update_misfit_status(NULL, this_rq);
/*
* We must set idle_stamp _before_ calling idle_balance(), such that we
@@ -10444,15 +10436,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
* Do not pull tasks towards !active CPUs...
*/
if (!cpu_active(this_cpu))
- return 0;
-
- /*
- * This is OK, because current is on_cpu, which avoids it being picked
- * for load-balance and preemption/IRQs are still disabled avoiding
- * further scheduler activity on it and we're being very careful to
- * re-start the picking loop.
- */
- rq_unpin_lock(this_rq, rf);
+ goto out_unlock;
if (this_rq->avg_idle < sysctl_sched_migration_cost ||
!READ_ONCE(this_rq->rd->overload)) {
@@ -10534,7 +10518,10 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
if (pulled_task)
this_rq->idle_stamp = 0;
- rq_repin_lock(this_rq, rf);
+out_unlock:
+ raw_spin_unlock_irq(&this_rq->lock);
+ local_bh_enable();
+ preempt_enable();
return pulled_task;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index db3a57675ccf..3d97c51544d7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1504,13 +1504,13 @@ static inline void unregister_sched_domain_sysctl(void)
}
#endif
-extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
+extern int newidle_balance(void);
#else
static inline void sched_ttwu_pending(void) { }
-static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; }
+static inline int newidle_balance(void) { return 0; }
#endif /* CONFIG_SMP */
@@ -1742,8 +1742,6 @@ extern const u32 sched_prio_to_wmult[40];
#define ENQUEUE_MIGRATED 0x00
#endif
-#define RETRY_TASK ((void *)-1UL)
-
struct sched_class {
const struct sched_class *next;
--
2.18.2
next prev parent reply other threads:[~2020-04-28 5:03 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-28 5:02 [RFC PATCH 0/3] newidle_balance() latency mitigation Scott Wood
2020-04-28 5:02 ` Scott Wood [this message]
2020-04-28 21:37 ` [RFC PATCH 1/3] sched/fair: Call newidle_balance() from finish_task_switch() Valentin Schneider
2020-04-28 22:09 ` Peter Zijlstra
2020-04-28 22:55 ` Scott Wood
2020-04-28 23:02 ` Peter Zijlstra
2020-04-28 23:20 ` Scott Wood
2020-04-29 9:05 ` Peter Zijlstra
2020-04-30 1:31 ` Scott Wood
2020-05-11 10:58 ` Peter Zijlstra
2020-05-11 12:13 ` Peter Zijlstra
2020-04-28 22:33 ` Scott Wood
2020-04-29 12:00 ` Valentin Schneider
2020-04-29 8:27 ` Vincent Guittot
2020-04-30 1:36 ` Scott Wood
2020-04-28 5:02 ` [RFC PATCH 2/3] sched/fair: Enable interrupts when dropping lock in newidle_balance() Scott Wood
2020-04-28 5:02 ` [RFC PATCH 3/3] sched,rt: break out of load balancing if an RT task appears Scott Wood
2020-04-28 21:56 ` Valentin Schneider
2020-04-28 22:33 ` Scott Wood
2020-04-28 22:52 ` Scott Wood
2020-04-29 12:01 ` Valentin Schneider
2020-04-28 13:27 ` [RFC PATCH 0/3] newidle_balance() latency mitigation Steven Rostedt
2020-04-29 23:13 ` Valentin Schneider
2020-04-30 7:44 ` Vincent Guittot
2020-04-30 10:14 ` Valentin Schneider
2020-04-30 12:42 ` Vincent Guittot
2020-04-30 13:56 ` Valentin Schneider
2020-04-30 12:48 ` Vincent Guittot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200428050242.17717-2-swood@redhat.com \
--to=swood@redhat.com \
--cc=dietmar.eggemann@arm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rt-users@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=riel@surriel.com \
--cc=rostedt@goodmis.org \
--cc=vincent.guittot@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).