linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Jan H. Schönherr" <jschoenh@amazon.de>
To: Ingo Molnar <mingo@redhat.com>, Peter Zijlstra <peterz@infradead.org>
Cc: "Jan H. Schönherr" <jschoenh@amazon.de>, linux-kernel@vger.kernel.org
Subject: [RFC 54/60] cosched: Support idling in a coscheduled set
Date: Fri,  7 Sep 2018 23:40:41 +0200	[thread overview]
Message-ID: <20180907214047.26914-55-jschoenh@amazon.de> (raw)
In-Reply-To: <20180907214047.26914-1-jschoenh@amazon.de>

If a coscheduled set is partly idle, some CPUs *must* do nothing, even
if they have other tasks (in other coscheduled sets). This forced idle
mode must work similar to normal task execution, e.g., not just any
task is allowed to replace the forced idle task.

Lay the ground work for this by introducing the general helper functions
to enter and leave the forced idle mode.

Whenever we are in forced idle, we execute the normal idle task, but we
forward many decisions to the fair scheduling class. The functions in
the fair scheduling class are made aware of the forced idle mode and
base their actual decisions on the (SD-)SE, under which there were no
tasks.

Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
---
 kernel/sched/core.c  | 11 +++++++----
 kernel/sched/fair.c  | 43 +++++++++++++++++++++++++++++++++-------
 kernel/sched/idle.c  |  7 ++++++-
 kernel/sched/sched.h | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 104 insertions(+), 12 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b3ff885a88d4..75de3b83a8c6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -856,13 +856,16 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
 
 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 {
-	const struct sched_class *class;
+	const struct sched_class *class, *curr_class = rq->curr->sched_class;
+
+	if (cosched_is_idle(rq, rq->curr))
+		curr_class = &fair_sched_class;
 
-	if (p->sched_class == rq->curr->sched_class) {
-		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+	if (p->sched_class == curr_class) {
+		curr_class->check_preempt_curr(rq, p, flags);
 	} else {
 		for_each_class(class) {
-			if (class == rq->curr->sched_class)
+			if (class == curr_class)
 				break;
 			if (class == p->sched_class) {
 				resched_curr(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 210fcd534917..9e8b8119cdea 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5206,12 +5206,14 @@ static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
 	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+	if (cosched_is_idle(rq, p))
+		se = cosched_get_idle_se(rq);
 
 	SCHED_WARN_ON(task_rq(p) != rq);
 
 	if (nr_cfs_tasks(rq) > 1) {
-		u64 slice = sched_slice(cfs_rq, se);
+		u64 slice = sched_slice(cfs_rq_of(se), se);
 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 		s64 delta = slice - ran;
 
@@ -5232,11 +5234,17 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 static void hrtick_update(struct rq *rq)
 {
 	struct task_struct *curr = rq->curr;
+	struct sched_entity *se = &curr->se;
+
+	if (!hrtick_enabled(rq))
+		return;
 
-	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
+	if (cosched_is_idle(rq, curr))
+		se = cosched_get_idle_se(rq);
+	else if (curr->sched_class != &fair_sched_class)
 		return;
 
-	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
+	if (cfs_rq_of(se)->nr_running < sched_nr_latency)
 		hrtick_start_fair(rq, curr);
 }
 #else /* !CONFIG_SCHED_HRTICK */
@@ -6802,13 +6810,20 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
 {
 	struct task_struct *curr = rq->curr;
 	struct sched_entity *se = &curr->se, *pse = &p->se;
-	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
-	int scale = cfs_rq->nr_running >= sched_nr_latency;
 	int next_buddy_marked = 0;
+	struct cfs_rq *cfs_rq;
+	int scale;
+
+	/* FIXME: locking may be off after fetching the idle_se */
+	if (cosched_is_idle(rq, curr))
+		se = cosched_get_idle_se(rq);
 
 	if (unlikely(se == pse))
 		return;
 
+	cfs_rq = cfs_rq_of(se);
+	scale = cfs_rq->nr_running >= sched_nr_latency;
+
 	/*
 	 * This is possible from callers such as attach_tasks(), in which we
 	 * unconditionally check_prempt_curr() after an enqueue (which may have
@@ -7038,7 +7053,15 @@ void put_prev_entity_fair(struct rq *rq, struct sched_entity *se)
  */
 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
 {
-	put_prev_entity_fair(rq, &prev->se);
+	struct sched_entity *se = &prev->se;
+
+	if (cosched_is_idle(rq, prev)) {
+		se = cosched_get_and_clear_idle_se(rq);
+		if (__leader_of(se) != cpu_of(rq))
+			return;
+	}
+
+	put_prev_entity_fair(rq, se);
 }
 
 /*
@@ -9952,6 +9975,12 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 	struct sched_entity *se = &curr->se;
 	struct rq_owner_flags orf;
 
+	if (cosched_is_idle(rq, curr)) {
+		se = cosched_get_idle_se(rq);
+		if (__leader_of(se) != cpu_of(rq))
+			return;
+	}
+
 	rq_lock_owned(rq, &orf);
 	for_each_owned_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 16f84142f2f4..4df136ef1aeb 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -391,7 +391,8 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 static struct task_struct *
 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-	put_prev_task(rq, prev);
+	if (prev)
+		put_prev_task(rq, prev);
 	update_idle_core(rq);
 	schedstat_inc(rq->sched_goidle);
 
@@ -413,6 +414,8 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
 
 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
 {
+	if (cosched_is_idle(rq, prev))
+		fair_sched_class.put_prev_task(rq, prev);
 }
 
 /*
@@ -425,6 +428,8 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
  */
 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
 {
+	if (cosched_is_idle(rq, curr))
+		fair_sched_class.task_tick(rq, curr, queued);
 }
 
 static void set_curr_task_idle(struct rq *rq)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 48939c8e539d..f6146feb7e55 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1914,6 +1914,61 @@ extern const struct sched_class rt_sched_class;
 extern const struct sched_class fair_sched_class;
 extern const struct sched_class idle_sched_class;
 
+#ifdef CONFIG_COSCHEDULING
+static inline bool cosched_is_idle(struct rq *rq, struct task_struct *p)
+{
+	if (!rq->sdrq_data.idle_se)
+		return false;
+	if (SCHED_WARN_ON(p != rq->idle))
+		return false;
+	return true;
+}
+
+static inline struct sched_entity *cosched_get_idle_se(struct rq *rq)
+{
+	return rq->sdrq_data.idle_se;
+}
+
+static inline struct sched_entity *cosched_get_and_clear_idle_se(struct rq *rq)
+{
+	struct sched_entity *se = rq->sdrq_data.idle_se;
+
+	rq->sdrq_data.idle_se = NULL;
+
+	return se;
+}
+
+static inline struct sched_entity *cosched_set_idle(struct rq *rq,
+						    struct sched_entity *se)
+{
+	rq->sdrq_data.idle_se = se;
+	return &idle_sched_class.pick_next_task(rq, NULL, NULL)->se;
+}
+#else /* !CONFIG_COSCHEDULING */
+static inline bool cosched_is_idle(struct rq *rq, struct task_struct *p)
+{
+	return false;
+}
+
+static inline struct sched_entity *cosched_get_idle_se(struct rq *rq)
+{
+	BUILD_BUG();
+	return NULL;
+}
+
+static inline struct sched_entity *cosched_get_and_clear_idle_se(struct rq *rq)
+{
+	BUILD_BUG();
+	return NULL;
+}
+
+static inline struct sched_entity *cosched_set_idle(struct rq *rq,
+						    struct sched_entity *se)
+{
+	BUILD_BUG();
+	return NULL;
+}
+#endif /* !CONFIG_COSCHEDULING */
 
 #ifdef CONFIG_SMP
 
-- 
2.9.3.1.gcba166c.dirty


  parent reply	other threads:[~2018-09-07 21:43 UTC|newest]

Thread overview: 114+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-07 21:39 [RFC 00/60] Coscheduling for Linux Jan H. Schönherr
2018-09-07 21:39 ` [RFC 01/60] sched: Store task_group->se[] pointers as part of cfs_rq Jan H. Schönherr
2018-09-07 21:39 ` [RFC 02/60] sched: Introduce set_entity_cfs() to place a SE into a certain CFS runqueue Jan H. Schönherr
2018-09-07 21:39 ` [RFC 03/60] sched: Setup sched_domain_shared for all sched_domains Jan H. Schönherr
2018-09-07 21:39 ` [RFC 04/60] sched: Replace sd_numa_mask() hack with something sane Jan H. Schönherr
2018-09-07 21:39 ` [RFC 05/60] sched: Allow to retrieve the sched_domain_topology Jan H. Schönherr
2018-09-07 21:39 ` [RFC 06/60] sched: Add a lock-free variant of resched_cpu() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 07/60] sched: Reduce dependencies of init_tg_cfs_entry() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 08/60] sched: Move init_entity_runnable_average() into init_tg_cfs_entry() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 09/60] sched: Do not require a CFS in init_tg_cfs_entry() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 10/60] sched: Use parent_entity() in more places Jan H. Schönherr
2018-09-07 21:39 ` [RFC 11/60] locking/lockdep: Increase number of supported lockdep subclasses Jan H. Schönherr
2018-09-07 21:39 ` [RFC 12/60] locking/lockdep: Make cookie generator accessible Jan H. Schönherr
2018-09-07 21:40 ` [RFC 13/60] sched: Remove useless checks for root task-group Jan H. Schönherr
2018-09-07 21:40 ` [RFC 14/60] sched: Refactor sync_throttle() to accept a CFS runqueue as argument Jan H. Schönherr
2018-09-07 21:40 ` [RFC 15/60] sched: Introduce parent_cfs_rq() and use it Jan H. Schönherr
2018-09-07 21:40 ` [RFC 16/60] sched: Preparatory code movement Jan H. Schönherr
2018-09-07 21:40 ` [RFC 17/60] sched: Introduce and use generic task group CFS traversal functions Jan H. Schönherr
2018-09-07 21:40 ` [RFC 18/60] sched: Fix return value of SCHED_WARN_ON() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 19/60] sched: Add entity variants of enqueue_task_fair() and dequeue_task_fair() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 20/60] sched: Let {en,de}queue_entity_fair() work with a varying amount of tasks Jan H. Schönherr
2018-09-07 21:40 ` [RFC 21/60] sched: Add entity variants of put_prev_task_fair() and set_curr_task_fair() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 22/60] cosched: Add config option for coscheduling support Jan H. Schönherr
2018-09-07 21:40 ` [RFC 23/60] cosched: Add core data structures for coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 24/60] cosched: Do minimal pre-SMP coscheduler initialization Jan H. Schönherr
2018-09-07 21:40 ` [RFC 25/60] cosched: Prepare scheduling domain topology for coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 26/60] cosched: Construct runqueue hierarchy Jan H. Schönherr
2018-09-07 21:40 ` [RFC 27/60] cosched: Add some small helper functions for later use Jan H. Schönherr
2018-09-07 21:40 ` [RFC 28/60] cosched: Add is_sd_se() to distinguish SD-SEs from TG-SEs Jan H. Schönherr
2018-09-07 21:40 ` [RFC 29/60] cosched: Adjust code reflecting on the total number of CFS tasks on a CPU Jan H. Schönherr
2018-09-07 21:40 ` [RFC 30/60] cosched: Disallow share modification on task groups for now Jan H. Schönherr
2018-09-07 21:40 ` [RFC 31/60] cosched: Don't disable idle tick " Jan H. Schönherr
2018-09-07 21:40 ` [RFC 32/60] cosched: Specialize parent_cfs_rq() for hierarchical runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 33/60] cosched: Allow resched_curr() to be called " Jan H. Schönherr
2018-09-07 21:40 ` [RFC 34/60] cosched: Add rq_of() variants for different use cases Jan H. Schönherr
2018-09-07 21:40 ` [RFC 35/60] cosched: Adjust rq_lock() functions to work with hierarchical runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 36/60] cosched: Use hrq_of() for rq_clock() and rq_clock_task() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 37/60] cosched: Use hrq_of() for (indirect calls to) ___update_load_sum() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 38/60] cosched: Skip updates on non-CPU runqueues in cfs_rq_util_change() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 39/60] cosched: Adjust task group management for hierarchical runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 40/60] cosched: Keep track of task group hierarchy within each SD-RQ Jan H. Schönherr
2018-09-07 21:40 ` [RFC 41/60] cosched: Introduce locking for leader activities Jan H. Schönherr
2018-09-07 21:40 ` [RFC 42/60] cosched: Introduce locking for (mostly) enqueuing and dequeuing Jan H. Schönherr
2018-09-07 21:40 ` [RFC 43/60] cosched: Add for_each_sched_entity() variant for owned entities Jan H. Schönherr
2018-09-07 21:40 ` [RFC 44/60] cosched: Perform various rq_of() adjustments in scheduler code Jan H. Schönherr
2018-09-07 21:40 ` [RFC 45/60] cosched: Continue to account all load on per-CPU runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 46/60] cosched: Warn on throttling attempts of non-CPU runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 47/60] cosched: Adjust SE traversal and locking for common leader activities Jan H. Schönherr
2018-09-07 21:40 ` [RFC 48/60] cosched: Adjust SE traversal and locking for yielding and buddies Jan H. Schönherr
2018-09-07 21:40 ` [RFC 49/60] cosched: Adjust locking for enqueuing and dequeueing Jan H. Schönherr
2018-09-07 21:40 ` [RFC 50/60] cosched: Propagate load changes across hierarchy levels Jan H. Schönherr
2018-09-07 21:40 ` [RFC 51/60] cosched: Hacky work-around to avoid observing zero weight SD-SE Jan H. Schönherr
2018-09-07 21:40 ` [RFC 52/60] cosched: Support SD-SEs in enqueuing and dequeuing Jan H. Schönherr
2018-09-07 21:40 ` [RFC 53/60] cosched: Prevent balancing related functions from crossing hierarchy levels Jan H. Schönherr
2018-09-07 21:40 ` Jan H. Schönherr [this message]
2018-09-07 21:40 ` [RFC 55/60] cosched: Adjust task selection for coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 56/60] cosched: Adjust wakeup preemption rules " Jan H. Schönherr
2018-09-07 21:40 ` [RFC 57/60] cosched: Add sysfs interface to configure coscheduling on cgroups Jan H. Schönherr
2018-09-07 21:40 ` [RFC 58/60] cosched: Switch runqueues between regular scheduling and coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 59/60] cosched: Handle non-atomicity during switches to and from coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 60/60] cosched: Add command line argument to enable coscheduling Jan H. Schönherr
2018-09-10  2:50   ` Randy Dunlap
2018-09-12  0:24 ` [RFC 00/60] Coscheduling for Linux Nishanth Aravamudan
2018-09-12 19:34   ` Jan H. Schönherr
2018-09-12 23:15     ` Nishanth Aravamudan
2018-09-13 11:31       ` Jan H. Schönherr
2018-09-13 18:16         ` Nishanth Aravamudan
2018-09-12 23:18     ` Jan H. Schönherr
2018-09-13  3:05       ` Nishanth Aravamudan
2018-09-13 19:19 ` [RFC 61/60] cosched: Accumulated fixes and improvements Jan H. Schönherr
2018-09-26 17:25   ` Nishanth Aravamudan
2018-09-26 21:05     ` Nishanth Aravamudan
2018-10-01  9:13       ` Jan H. Schönherr
2018-09-14 11:12 ` [RFC 00/60] Coscheduling for Linux Peter Zijlstra
2018-09-14 16:25   ` Jan H. Schönherr
2018-09-15  8:48     ` Task group cleanups and optimizations (was: Re: [RFC 00/60] Coscheduling for Linux) Jan H. Schönherr
2018-09-17  9:48       ` Peter Zijlstra
2018-09-18 13:22         ` Jan H. Schönherr
2018-09-18 13:38           ` Peter Zijlstra
2018-09-18 13:54             ` Jan H. Schönherr
2018-09-18 13:42           ` Peter Zijlstra
2018-09-18 14:35           ` Rik van Riel
2018-09-19  9:23             ` Jan H. Schönherr
2018-11-23 16:51           ` Frederic Weisbecker
2018-12-04 13:23             ` Jan H. Schönherr
2018-09-17 11:33     ` [RFC 00/60] Coscheduling for Linux Peter Zijlstra
2018-11-02 22:13       ` Nishanth Aravamudan
2018-09-17 12:25     ` Peter Zijlstra
2018-09-26  9:58       ` Jan H. Schönherr
2018-09-27 18:36         ` Subhra Mazumdar
2018-11-23 16:29           ` Frederic Weisbecker
2018-09-17 13:37     ` Peter Zijlstra
2018-09-26  9:35       ` Jan H. Schönherr
2018-09-18 14:40     ` Rik van Riel
2018-09-24 15:23       ` Jan H. Schönherr
2018-09-24 18:01         ` Rik van Riel
2018-09-18  0:33 ` Subhra Mazumdar
2018-09-18 11:44   ` Jan H. Schönherr
2018-09-19 21:53     ` Subhra Mazumdar
2018-09-24 15:43       ` Jan H. Schönherr
2018-09-27 18:12         ` Subhra Mazumdar
2018-10-04 13:29 ` Jon Masters
2018-10-17  2:09 ` Frederic Weisbecker
2018-10-19 11:40   ` Jan H. Schönherr
2018-10-19 14:52     ` Frederic Weisbecker
2018-10-19 15:16     ` Rik van Riel
2018-10-19 15:33       ` Frederic Weisbecker
2018-10-19 15:45         ` Rik van Riel
2018-10-19 19:07           ` Jan H. Schönherr
2018-10-19  0:26 ` Subhra Mazumdar
2018-10-26 23:44   ` Jan H. Schönherr
2018-10-29 22:52     ` Subhra Mazumdar
2018-10-26 23:05 ` Subhra Mazumdar
2018-10-27  0:07   ` Jan H. Schönherr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180907214047.26914-55-jschoenh@amazon.de \
    --to=jschoenh@amazon.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).