linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Jan H. Schönherr" <jschoenh@amazon.de>
To: Ingo Molnar <mingo@redhat.com>, Peter Zijlstra <peterz@infradead.org>
Cc: "Jan H. Schönherr" <jschoenh@amazon.de>, linux-kernel@vger.kernel.org
Subject: [RFC 50/60] cosched: Propagate load changes across hierarchy levels
Date: Fri,  7 Sep 2018 23:40:37 +0200	[thread overview]
Message-ID: <20180907214047.26914-51-jschoenh@amazon.de> (raw)
In-Reply-To: <20180907214047.26914-1-jschoenh@amazon.de>

The weight of an SD-SE is defined to be the average weight of all
runqueues that are represented by the SD-SE. Hence, its weight
should change whenever one of the child runqueues changes its
weight. However, as these are two different hierarchy levels,
they are protected by different locks. To reduce lock contention,
we want to avoid holding higher level locks for prolonged amounts
of time, if possible.

Therefore, we update an aggregated weight -- sdrq->sdse_load --
in a lock-free manner during enqueue and dequeue in the lower level,
and once we actually get the higher level lock, we perform the actual
SD-SE weight adjustment via update_sdse_load().

At some point in the future (the code isn't there yet), this will
allow software combining, where not all CPUs have to walk up the
full hierarchy on enqueue/dequeue.

Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
---
 kernel/sched/fair.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 55 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0dc4d289497c..1eee262ecf88 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2740,6 +2740,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 static void
 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+#ifdef CONFIG_COSCHEDULING
+	if (!cfs_rq->sdrq.is_root && !cfs_rq->throttled)
+		atomic64_add(se->load.weight, &cfs_rq->sdrq.sd_parent->sdse_load);
+#endif
 	update_load_add(&cfs_rq->load, se->load.weight);
 	if (!parent_entity(se) || is_sd_se(parent_entity(se)))
 		update_load_add(&hrq_of(cfs_rq)->load, se->load.weight);
@@ -2757,6 +2761,10 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static void
 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+#ifdef CONFIG_COSCHEDULING
+	if (!cfs_rq->sdrq.is_root && !cfs_rq->throttled)
+		atomic64_sub(se->load.weight, &cfs_rq->sdrq.sd_parent->sdse_load);
+#endif
 	update_load_sub(&cfs_rq->load, se->load.weight);
 	if (!parent_entity(se) || is_sd_se(parent_entity(se)))
 		update_load_sub(&hrq_of(cfs_rq)->load, se->load.weight);
@@ -3083,6 +3091,35 @@ static inline void update_cfs_group(struct sched_entity *se)
 }
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
+#ifdef CONFIG_COSCHEDULING
+static void update_sdse_load(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct sdrq *sdrq = &cfs_rq->sdrq;
+	unsigned long load;
+
+	if (!is_sd_se(se))
+		return;
+
+	/* FIXME: the load calculation assumes a homogeneous topology */
+	load = atomic64_read(&sdrq->sdse_load);
+
+	if (!list_empty(&sdrq->children)) {
+		struct sdrq *entry;
+
+		entry = list_first_entry(&sdrq->children, struct sdrq, siblings);
+		load *= entry->data->span_weight;
+	}
+
+	load /= sdrq->data->span_weight;
+
+	/* FIXME: Use a proper runnable */
+	reweight_entity(cfs_rq, se, load, load);
+}
+#else /* !CONFIG_COSCHEDULING */
+static void update_sdse_load(struct sched_entity *se) { }
+#endif /* !CONFIG_COSCHEDULING */
+
 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 {
 	struct rq *rq = hrq_of(cfs_rq);
@@ -4527,6 +4564,11 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 
 	se = cfs_rq->my_se;
 
+#ifdef CONFIG_COSCHEDULING
+	if (!cfs_rq->sdrq.is_root && !cfs_rq->throttled)
+		atomic64_sub(cfs_rq->load.weight,
+			     &cfs_rq->sdrq.sd_parent->sdse_load);
+#endif
 	/* freeze hierarchy runnable averages while throttled */
 	rcu_read_lock();
 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
@@ -4538,6 +4580,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 
 		rq_chain_lock(&rc, se);
+		update_sdse_load(se);
+
 		/* throttled entity or throttle-on-deactivate */
 		if (!se->on_rq)
 			break;
@@ -4590,6 +4634,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 	se = cfs_rq->my_se;
 
 	cfs_rq->throttled = 0;
+#ifdef CONFIG_COSCHEDULING
+	if (!cfs_rq->sdrq.is_root && !cfs_rq->throttled)
+		atomic64_add(cfs_rq->load.weight,
+			     &cfs_rq->sdrq.sd_parent->sdse_load);
+#endif
 
 	update_rq_clock(rq);
 
@@ -4608,6 +4657,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 	rq_chain_init(&rc, rq);
 	for_each_sched_entity(se) {
 		rq_chain_lock(&rc, se);
+		update_sdse_load(se);
 		if (se->on_rq)
 			enqueue = 0;
 
@@ -5152,6 +5202,7 @@ bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
 	rq_chain_init(&rc, rq);
 	for_each_sched_entity(se) {
 		rq_chain_lock(&rc, se);
+		update_sdse_load(se);
 		if (se->on_rq)
 			break;
 		cfs_rq = cfs_rq_of(se);
@@ -5173,6 +5224,7 @@ bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
 	for_each_sched_entity(se) {
 		/* FIXME: taking locks up to the top is bad */
 		rq_chain_lock(&rc, se);
+		update_sdse_load(se);
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running += task_delta;
 
@@ -5235,6 +5287,7 @@ bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
 	rq_chain_init(&rc, rq);
 	for_each_sched_entity(se) {
 		rq_chain_lock(&rc, se);
+		update_sdse_load(se);
 		cfs_rq = cfs_rq_of(se);
 		dequeue_entity(cfs_rq, se, flags);
 
@@ -5269,6 +5322,7 @@ bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
 	for_each_sched_entity(se) {
 		/* FIXME: taking locks up to the top is bad */
 		rq_chain_lock(&rc, se);
+		update_sdse_load(se);
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running -= task_delta;
 
@@ -9897,6 +9951,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
 
 	for_each_sched_entity(se) {
 		rq_chain_lock(&rc, se);
+		update_sdse_load(se);
 		cfs_rq = cfs_rq_of(se);
 
 		if (cfs_rq_throttled(cfs_rq))
-- 
2.9.3.1.gcba166c.dirty


  parent reply	other threads:[~2018-09-07 21:49 UTC|newest]

Thread overview: 114+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-07 21:39 [RFC 00/60] Coscheduling for Linux Jan H. Schönherr
2018-09-07 21:39 ` [RFC 01/60] sched: Store task_group->se[] pointers as part of cfs_rq Jan H. Schönherr
2018-09-07 21:39 ` [RFC 02/60] sched: Introduce set_entity_cfs() to place a SE into a certain CFS runqueue Jan H. Schönherr
2018-09-07 21:39 ` [RFC 03/60] sched: Setup sched_domain_shared for all sched_domains Jan H. Schönherr
2018-09-07 21:39 ` [RFC 04/60] sched: Replace sd_numa_mask() hack with something sane Jan H. Schönherr
2018-09-07 21:39 ` [RFC 05/60] sched: Allow to retrieve the sched_domain_topology Jan H. Schönherr
2018-09-07 21:39 ` [RFC 06/60] sched: Add a lock-free variant of resched_cpu() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 07/60] sched: Reduce dependencies of init_tg_cfs_entry() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 08/60] sched: Move init_entity_runnable_average() into init_tg_cfs_entry() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 09/60] sched: Do not require a CFS in init_tg_cfs_entry() Jan H. Schönherr
2018-09-07 21:39 ` [RFC 10/60] sched: Use parent_entity() in more places Jan H. Schönherr
2018-09-07 21:39 ` [RFC 11/60] locking/lockdep: Increase number of supported lockdep subclasses Jan H. Schönherr
2018-09-07 21:39 ` [RFC 12/60] locking/lockdep: Make cookie generator accessible Jan H. Schönherr
2018-09-07 21:40 ` [RFC 13/60] sched: Remove useless checks for root task-group Jan H. Schönherr
2018-09-07 21:40 ` [RFC 14/60] sched: Refactor sync_throttle() to accept a CFS runqueue as argument Jan H. Schönherr
2018-09-07 21:40 ` [RFC 15/60] sched: Introduce parent_cfs_rq() and use it Jan H. Schönherr
2018-09-07 21:40 ` [RFC 16/60] sched: Preparatory code movement Jan H. Schönherr
2018-09-07 21:40 ` [RFC 17/60] sched: Introduce and use generic task group CFS traversal functions Jan H. Schönherr
2018-09-07 21:40 ` [RFC 18/60] sched: Fix return value of SCHED_WARN_ON() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 19/60] sched: Add entity variants of enqueue_task_fair() and dequeue_task_fair() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 20/60] sched: Let {en,de}queue_entity_fair() work with a varying amount of tasks Jan H. Schönherr
2018-09-07 21:40 ` [RFC 21/60] sched: Add entity variants of put_prev_task_fair() and set_curr_task_fair() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 22/60] cosched: Add config option for coscheduling support Jan H. Schönherr
2018-09-07 21:40 ` [RFC 23/60] cosched: Add core data structures for coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 24/60] cosched: Do minimal pre-SMP coscheduler initialization Jan H. Schönherr
2018-09-07 21:40 ` [RFC 25/60] cosched: Prepare scheduling domain topology for coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 26/60] cosched: Construct runqueue hierarchy Jan H. Schönherr
2018-09-07 21:40 ` [RFC 27/60] cosched: Add some small helper functions for later use Jan H. Schönherr
2018-09-07 21:40 ` [RFC 28/60] cosched: Add is_sd_se() to distinguish SD-SEs from TG-SEs Jan H. Schönherr
2018-09-07 21:40 ` [RFC 29/60] cosched: Adjust code reflecting on the total number of CFS tasks on a CPU Jan H. Schönherr
2018-09-07 21:40 ` [RFC 30/60] cosched: Disallow share modification on task groups for now Jan H. Schönherr
2018-09-07 21:40 ` [RFC 31/60] cosched: Don't disable idle tick " Jan H. Schönherr
2018-09-07 21:40 ` [RFC 32/60] cosched: Specialize parent_cfs_rq() for hierarchical runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 33/60] cosched: Allow resched_curr() to be called " Jan H. Schönherr
2018-09-07 21:40 ` [RFC 34/60] cosched: Add rq_of() variants for different use cases Jan H. Schönherr
2018-09-07 21:40 ` [RFC 35/60] cosched: Adjust rq_lock() functions to work with hierarchical runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 36/60] cosched: Use hrq_of() for rq_clock() and rq_clock_task() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 37/60] cosched: Use hrq_of() for (indirect calls to) ___update_load_sum() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 38/60] cosched: Skip updates on non-CPU runqueues in cfs_rq_util_change() Jan H. Schönherr
2018-09-07 21:40 ` [RFC 39/60] cosched: Adjust task group management for hierarchical runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 40/60] cosched: Keep track of task group hierarchy within each SD-RQ Jan H. Schönherr
2018-09-07 21:40 ` [RFC 41/60] cosched: Introduce locking for leader activities Jan H. Schönherr
2018-09-07 21:40 ` [RFC 42/60] cosched: Introduce locking for (mostly) enqueuing and dequeuing Jan H. Schönherr
2018-09-07 21:40 ` [RFC 43/60] cosched: Add for_each_sched_entity() variant for owned entities Jan H. Schönherr
2018-09-07 21:40 ` [RFC 44/60] cosched: Perform various rq_of() adjustments in scheduler code Jan H. Schönherr
2018-09-07 21:40 ` [RFC 45/60] cosched: Continue to account all load on per-CPU runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 46/60] cosched: Warn on throttling attempts of non-CPU runqueues Jan H. Schönherr
2018-09-07 21:40 ` [RFC 47/60] cosched: Adjust SE traversal and locking for common leader activities Jan H. Schönherr
2018-09-07 21:40 ` [RFC 48/60] cosched: Adjust SE traversal and locking for yielding and buddies Jan H. Schönherr
2018-09-07 21:40 ` [RFC 49/60] cosched: Adjust locking for enqueuing and dequeueing Jan H. Schönherr
2018-09-07 21:40 ` Jan H. Schönherr [this message]
2018-09-07 21:40 ` [RFC 51/60] cosched: Hacky work-around to avoid observing zero weight SD-SE Jan H. Schönherr
2018-09-07 21:40 ` [RFC 52/60] cosched: Support SD-SEs in enqueuing and dequeuing Jan H. Schönherr
2018-09-07 21:40 ` [RFC 53/60] cosched: Prevent balancing related functions from crossing hierarchy levels Jan H. Schönherr
2018-09-07 21:40 ` [RFC 54/60] cosched: Support idling in a coscheduled set Jan H. Schönherr
2018-09-07 21:40 ` [RFC 55/60] cosched: Adjust task selection for coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 56/60] cosched: Adjust wakeup preemption rules " Jan H. Schönherr
2018-09-07 21:40 ` [RFC 57/60] cosched: Add sysfs interface to configure coscheduling on cgroups Jan H. Schönherr
2018-09-07 21:40 ` [RFC 58/60] cosched: Switch runqueues between regular scheduling and coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 59/60] cosched: Handle non-atomicity during switches to and from coscheduling Jan H. Schönherr
2018-09-07 21:40 ` [RFC 60/60] cosched: Add command line argument to enable coscheduling Jan H. Schönherr
2018-09-10  2:50   ` Randy Dunlap
2018-09-12  0:24 ` [RFC 00/60] Coscheduling for Linux Nishanth Aravamudan
2018-09-12 19:34   ` Jan H. Schönherr
2018-09-12 23:15     ` Nishanth Aravamudan
2018-09-13 11:31       ` Jan H. Schönherr
2018-09-13 18:16         ` Nishanth Aravamudan
2018-09-12 23:18     ` Jan H. Schönherr
2018-09-13  3:05       ` Nishanth Aravamudan
2018-09-13 19:19 ` [RFC 61/60] cosched: Accumulated fixes and improvements Jan H. Schönherr
2018-09-26 17:25   ` Nishanth Aravamudan
2018-09-26 21:05     ` Nishanth Aravamudan
2018-10-01  9:13       ` Jan H. Schönherr
2018-09-14 11:12 ` [RFC 00/60] Coscheduling for Linux Peter Zijlstra
2018-09-14 16:25   ` Jan H. Schönherr
2018-09-15  8:48     ` Task group cleanups and optimizations (was: Re: [RFC 00/60] Coscheduling for Linux) Jan H. Schönherr
2018-09-17  9:48       ` Peter Zijlstra
2018-09-18 13:22         ` Jan H. Schönherr
2018-09-18 13:38           ` Peter Zijlstra
2018-09-18 13:54             ` Jan H. Schönherr
2018-09-18 13:42           ` Peter Zijlstra
2018-09-18 14:35           ` Rik van Riel
2018-09-19  9:23             ` Jan H. Schönherr
2018-11-23 16:51           ` Frederic Weisbecker
2018-12-04 13:23             ` Jan H. Schönherr
2018-09-17 11:33     ` [RFC 00/60] Coscheduling for Linux Peter Zijlstra
2018-11-02 22:13       ` Nishanth Aravamudan
2018-09-17 12:25     ` Peter Zijlstra
2018-09-26  9:58       ` Jan H. Schönherr
2018-09-27 18:36         ` Subhra Mazumdar
2018-11-23 16:29           ` Frederic Weisbecker
2018-09-17 13:37     ` Peter Zijlstra
2018-09-26  9:35       ` Jan H. Schönherr
2018-09-18 14:40     ` Rik van Riel
2018-09-24 15:23       ` Jan H. Schönherr
2018-09-24 18:01         ` Rik van Riel
2018-09-18  0:33 ` Subhra Mazumdar
2018-09-18 11:44   ` Jan H. Schönherr
2018-09-19 21:53     ` Subhra Mazumdar
2018-09-24 15:43       ` Jan H. Schönherr
2018-09-27 18:12         ` Subhra Mazumdar
2018-10-04 13:29 ` Jon Masters
2018-10-17  2:09 ` Frederic Weisbecker
2018-10-19 11:40   ` Jan H. Schönherr
2018-10-19 14:52     ` Frederic Weisbecker
2018-10-19 15:16     ` Rik van Riel
2018-10-19 15:33       ` Frederic Weisbecker
2018-10-19 15:45         ` Rik van Riel
2018-10-19 19:07           ` Jan H. Schönherr
2018-10-19  0:26 ` Subhra Mazumdar
2018-10-26 23:44   ` Jan H. Schönherr
2018-10-29 22:52     ` Subhra Mazumdar
2018-10-26 23:05 ` Subhra Mazumdar
2018-10-27  0:07   ` Jan H. Schönherr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180907214047.26914-51-jschoenh@amazon.de \
    --to=jschoenh@amazon.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).