All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Turner <pjt@google.com>
To: linux-kernel@vger.kernel.org
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Bharata B Rao <bharata@linux.vnet.ibm.com>,
	Dhaval Giani <dhaval.giani@gmail.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>,
	Srivatsa Vaddagiri <vatsa@in.ibm.com>,
	Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@elte.hu>, Pavel Emelyanov <xemul@openvz.org>,
	Nikhil Rao <ncrao@google.com>
Subject: [patch 08/15] sched: throttle cfs_rq entities which exceed their local runtime
Date: Tue, 03 May 2011 02:28:54 -0700	[thread overview]
Message-ID: <20110503092905.163081832@google.com> (raw)
In-Reply-To: 20110503092846.022272244@google.com

[-- Attachment #1: sched-bwc-throttle_entities.patch --]
[-- Type: text/plain, Size: 8090 bytes --]

In account_cfs_rq_runtime() (via update_curr()) we track consumption versus a
cfs_rqs locally assigned runtime and whether there is global runtime available 
to provide a refill when it runs out.

In the case that there is no runtime remaining it's necessary to throttle so
that execution ceases until the susbequent period.  While it is at this
boundary that we detect (and signal for, via reshed_task) that a throttle is
required, the actual operation is deferred until put_prev_entity().

At this point the cfs_rq is marked as throttled and not re-enqueued, this
avoids potential interactions with throttled runqueues in the event that we
are not immediately able to evict the running task.

Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Nikhil Rao <ncrao@google.com>
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
---
 kernel/sched.c      |    7 ++
 kernel/sched_fair.c |  131 ++++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 133 insertions(+), 5 deletions(-)

Index: tip/kernel/sched_fair.c
===================================================================
--- tip.orig/kernel/sched_fair.c
+++ tip/kernel/sched_fair.c
@@ -985,6 +985,8 @@ place_entity(struct cfs_rq *cfs_rq, stru
 	se->vruntime = vruntime;
 }
 
+static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+
 static void
 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
@@ -1014,8 +1016,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
 		__enqueue_entity(cfs_rq, se);
 	se->on_rq = 1;
 
-	if (cfs_rq->nr_running == 1)
+	if (cfs_rq->nr_running == 1) {
 		list_add_leaf_cfs_rq(cfs_rq);
+		check_enqueue_throttle(cfs_rq);
+	}
 
 	start_cfs_bandwidth(cfs_rq);
 }
@@ -1221,6 +1225,8 @@ static struct sched_entity *pick_next_en
 	return se;
 }
 
+static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+
 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 {
 	/*
@@ -1230,6 +1236,9 @@ static void put_prev_entity(struct cfs_r
 	if (prev->on_rq)
 		update_curr(cfs_rq);
 
+	/* throttle cfs_rqs exceeding runtime */
+	check_cfs_rq_runtime(cfs_rq);
+
 	check_spread(cfs_rq, prev);
 	if (prev->on_rq) {
 		update_stats_wait_start(cfs_rq, prev);
@@ -1295,7 +1304,7 @@ static inline u64 sched_cfs_bandwidth_sl
 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
 }
 
-static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
 	struct task_group *tg = cfs_rq->tg;
 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
@@ -1317,6 +1326,8 @@ static void assign_cfs_rq_runtime(struct
 
 	cfs_rq->runtime_remaining += amount;
 	cfs_rq->runtime_expires = max(cfs_rq->runtime_expires, expires);
+
+	return cfs_rq->runtime_remaining > 0;
 }
 
 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
@@ -1359,7 +1370,90 @@ static void account_cfs_rq_runtime(struc
 	if (cfs_rq->runtime_remaining > 0)
 		return;
 
-	assign_cfs_rq_runtime(cfs_rq);
+	/*
+	 * if we're unable to extend our runtime we resched so that the active
+	 * hierarchy can be throttled
+	 */
+	if (!assign_cfs_rq_runtime(cfs_rq))
+		resched_task(rq_of(cfs_rq)->curr);
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->throttled;
+}
+
+static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
+{
+	struct rq *rq = rq_of(cfs_rq);
+	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+	struct sched_entity *se;
+	long task_delta, dequeue = 1;
+
+	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
+
+	/* account load preceding throttle */
+	update_cfs_load(cfs_rq, 0);
+
+	task_delta = -cfs_rq->h_nr_running;
+	for_each_sched_entity(se) {
+		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+		/* throttled entity or throttle-on-deactivate */
+		if (!se->on_rq)
+			break;
+
+		if (dequeue)
+			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
+		qcfs_rq->h_nr_running += task_delta;
+
+		if (qcfs_rq->load.weight)
+			dequeue = 0;
+	}
+
+	if (!se)
+		rq->nr_running += task_delta;
+
+	cfs_rq->throttled = 1;
+	raw_spin_lock(&cfs_b->lock);
+	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+	raw_spin_unlock(&cfs_b->lock);
+}
+
+/* conditionally throttle active cfs_rq's from put_prev_entity() */
+static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+{
+	if (!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)
+		return;
+
+	/*
+	 * it's possible active load balance has forced a throttled cfs_rq to
+	 * run again, we don't want to re-throttled in this case.
+	 */
+	if (cfs_rq_throttled(cfs_rq))
+		return;
+
+	throttle_cfs_rq(cfs_rq);
+}
+
+/*
+ * When a group wakes up we want to make sure that its quota is not already
+ * expired, otherwise it may be allowed to steal additional ticks of runtime
+ * since update_curr() throttling can not not trigger until it's on-rq.
+ */
+static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
+{
+	/* an active group must be handled by the update_curr()->put() path */
+	if (cfs_rq->curr || !cfs_rq->runtime_enabled)
+		return;
+
+	/* ensure the group is not already throttled */
+	if (cfs_rq_throttled(cfs_rq))
+		return;
+
+	/* update runtime allocation */
+	account_cfs_rq_runtime(cfs_rq, 0);
+	if (cfs_rq->runtime_remaining <= 0)
+		throttle_cfs_rq(cfs_rq);
 }
 
 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
@@ -1389,6 +1483,14 @@ static int do_sched_cfs_period_timer(str
 #else
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
 		unsigned long delta_exec) {}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
+{
+	return 0;
+}
+
+static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
+static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
 #endif
 
 /**************************************************
@@ -1468,6 +1570,12 @@ enqueue_task_fair(struct rq *rq, struct 
 		cfs_rq = cfs_rq_of(se);
 		enqueue_entity(cfs_rq, se, flags);
 		cfs_rq->h_nr_running++;
+
+		/* end evaluation on throttled cfs_rq */
+		if (cfs_rq_throttled(cfs_rq)) {
+			se = NULL;
+			break;
+		}
 		flags = ENQUEUE_WAKEUP;
 	}
 
@@ -1475,11 +1583,15 @@ enqueue_task_fair(struct rq *rq, struct 
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running++;
 
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+
 		update_cfs_load(cfs_rq, 0);
 		update_cfs_shares(cfs_rq);
 	}
 
-	inc_nr_running(rq);
+	if (!se)
+		inc_nr_running(rq);
 	hrtick_update(rq);
 }
 
@@ -1498,6 +1610,11 @@ static void dequeue_task_fair(struct rq 
 		dequeue_entity(cfs_rq, se, flags);
 		cfs_rq->h_nr_running--;
 
+		/* end evaluation on throttled cfs_rq */
+		if (cfs_rq_throttled(cfs_rq)) {
+			se = NULL;
+			break;
+		}
 		/* Don't dequeue parent if it has other entities besides us */
 		if (cfs_rq->load.weight) {
 			se = parent_entity(se);
@@ -1510,11 +1627,15 @@ static void dequeue_task_fair(struct rq 
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running--;
 
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+
 		update_cfs_load(cfs_rq, 0);
 		update_cfs_shares(cfs_rq);
 	}
 
-	dec_nr_running(rq);
+	if (!se)
+		dec_nr_running(rq);
 	hrtick_update(rq);
 }
 
Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -258,6 +258,8 @@ struct cfs_bandwidth {
 
 	int idle;
 	struct hrtimer period_timer;
+	struct list_head throttled_cfs_rq;
+
 #endif
 };
 
@@ -392,6 +394,9 @@ struct cfs_rq {
 	int runtime_enabled;
 	u64 runtime_expires;
 	s64 runtime_remaining;
+
+	int throttled;
+	struct list_head throttled_list;
 #endif
 #endif
 };
@@ -433,6 +438,7 @@ static void init_cfs_bandwidth(struct cf
 	cfs_b->quota = RUNTIME_INF;
 	cfs_b->period = ns_to_ktime(default_cfs_period());
 
+	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	cfs_b->period_timer.function = sched_cfs_period_timer;
 
@@ -442,6 +448,7 @@ static void init_cfs_rq_runtime(struct c
 {
 	cfs_rq->runtime_remaining = 0;
 	cfs_rq->runtime_enabled = 0;
+	INIT_LIST_HEAD(&cfs_rq->throttled_list);
 }
 
 static void start_cfs_bandwidth(struct cfs_rq *cfs_rq)



  parent reply	other threads:[~2011-05-07  6:33 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-05-03  9:28 [patch 00/15] CFS Bandwidth Control V6 Paul Turner
2011-05-03  9:28 ` [patch 01/15] sched: (fixlet) dont update shares twice on on_rq parent Paul Turner
2011-05-10  7:14   ` Hidetoshi Seto
2011-05-10  8:32     ` Mike Galbraith
2011-05-11  7:55       ` Hidetoshi Seto
2011-05-11  8:13         ` Paul Turner
2011-05-11  8:45           ` Mike Galbraith
2011-05-11  8:59             ` Hidetoshi Seto
2011-05-03  9:28 ` [patch 02/15] sched: hierarchical task accounting for SCHED_OTHER Paul Turner
2011-05-10  7:17   ` Hidetoshi Seto
2011-05-03  9:28 ` [patch 03/15] sched: introduce primitives to account for CFS bandwidth tracking Paul Turner
2011-05-10  7:18   ` Hidetoshi Seto
2011-05-03  9:28 ` [patch 04/15] sched: validate CFS quota hierarchies Paul Turner
2011-05-10  7:20   ` Hidetoshi Seto
2011-05-11  9:37     ` Paul Turner
2011-05-16  9:30   ` Peter Zijlstra
2011-05-16  9:43   ` Peter Zijlstra
2011-05-16 12:32     ` Paul Turner
2011-05-17 15:26       ` Peter Zijlstra
2011-05-18  7:16         ` Paul Turner
2011-05-18 11:57           ` Peter Zijlstra
2011-05-03  9:28 ` [patch 05/15] sched: add a timer to handle CFS bandwidth refresh Paul Turner
2011-05-10  7:21   ` Hidetoshi Seto
2011-05-11  9:27     ` Paul Turner
2011-05-16 10:18   ` Peter Zijlstra
2011-05-16 12:56     ` Paul Turner
2011-05-03  9:28 ` [patch 06/15] sched: accumulate per-cfs_rq cpu usage and charge against bandwidth Paul Turner
2011-05-10  7:22   ` Hidetoshi Seto
2011-05-11  9:25     ` Paul Turner
2011-05-16 10:27   ` Peter Zijlstra
2011-05-16 12:59     ` Paul Turner
2011-05-17 15:28       ` Peter Zijlstra
2011-05-18  7:02         ` Paul Turner
2011-05-16 10:32   ` Peter Zijlstra
2011-05-03  9:28 ` [patch 07/15] sched: expire invalid runtime Paul Turner
2011-05-10  7:22   ` Hidetoshi Seto
2011-05-16 11:05   ` Peter Zijlstra
2011-05-16 11:07   ` Peter Zijlstra
2011-05-03  9:28 ` Paul Turner [this message]
2011-05-10  7:23   ` [patch 08/15] sched: throttle cfs_rq entities which exceed their local runtime Hidetoshi Seto
2011-05-16 15:58   ` Peter Zijlstra
2011-05-16 16:05   ` Peter Zijlstra
2011-05-03  9:28 ` [patch 09/15] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh Paul Turner
2011-05-10  7:24   ` Hidetoshi Seto
2011-05-11  9:24     ` Paul Turner
2011-05-03  9:28 ` [patch 10/15] sched: allow for positional tg_tree walks Paul Turner
2011-05-10  7:24   ` Hidetoshi Seto
2011-05-17 13:31   ` Peter Zijlstra
2011-05-18  7:18     ` Paul Turner
2011-05-03  9:28 ` [patch 11/15] sched: prevent interactions between throttled entities and load-balance Paul Turner
2011-05-10  7:26   ` Hidetoshi Seto
2011-05-11  9:11     ` Paul Turner
2011-05-03  9:28 ` [patch 12/15] sched: migrate throttled tasks on HOTPLUG Paul Turner
2011-05-10  7:27   ` Hidetoshi Seto
2011-05-11  9:10     ` Paul Turner
2011-05-03  9:28 ` [patch 13/15] sched: add exports tracking cfs bandwidth control statistics Paul Turner
2011-05-10  7:27   ` Hidetoshi Seto
2011-05-11  7:56   ` Hidetoshi Seto
2011-05-11  9:09     ` Paul Turner
2011-05-03  9:29 ` [patch 14/15] sched: return unused runtime on voluntary sleep Paul Turner
2011-05-10  7:28   ` Hidetoshi Seto
2011-05-03  9:29 ` [patch 15/15] sched: add documentation for bandwidth control Paul Turner
2011-05-10  7:29   ` Hidetoshi Seto
2011-05-11  9:09     ` Paul Turner
2011-06-07 15:45 ` CFS Bandwidth Control - Test results of cgroups tasks pinned vs unpinned Kamalesh Babulal
2011-06-08  3:09   ` Paul Turner
2011-06-08 10:46   ` Vladimir Davydov
2011-06-08 16:32     ` Kamalesh Babulal
2011-06-09  3:25       ` Paul Turner
2011-06-10 18:17         ` Kamalesh Babulal
2011-06-14  0:00           ` Paul Turner
2011-06-15  5:37             ` Kamalesh Babulal
2011-06-21 19:48               ` Paul Turner
2011-06-24 15:05                 ` Kamalesh Babulal
2011-09-07 11:00                 ` Srivatsa Vaddagiri
2011-09-07 14:54                 ` Srivatsa Vaddagiri
2011-09-07 15:20                 ` CFS Bandwidth Control - Test results of cgroups tasks pinned vs unpinnede Srivatsa Vaddagiri
2011-09-07 19:22                   ` Peter Zijlstra
2011-09-08 15:15                     ` Srivatsa Vaddagiri
2011-09-09 12:31                       ` Peter Zijlstra
2011-09-09 13:26                         ` Srivatsa Vaddagiri
2011-09-12 10:17                         ` Srivatsa Vaddagiri
2011-09-12 12:35                           ` Peter Zijlstra
2011-09-13  4:15                             ` Srivatsa Vaddagiri
2011-09-13  5:03                               ` Srivatsa Vaddagiri
2011-09-13  5:05                                 ` Srivatsa Vaddagiri
2011-09-13  9:39                                 ` Peter Zijlstra
2011-09-13 11:28                                   ` Srivatsa Vaddagiri
2011-09-13 14:07                                     ` Peter Zijlstra
2011-09-13 16:21                                       ` Srivatsa Vaddagiri
2011-09-13 16:33                                         ` Peter Zijlstra
2011-09-13 17:41                                           ` Srivatsa Vaddagiri
2011-09-13 16:36                                         ` Peter Zijlstra
2011-09-13 17:54                                           ` Srivatsa Vaddagiri
2011-09-13 18:03                                             ` Peter Zijlstra
2011-09-13 18:12                                               ` Srivatsa Vaddagiri
2011-09-13 18:07                                             ` Peter Zijlstra
2011-09-13 18:19                                             ` Peter Zijlstra
2011-09-13 18:28                                               ` Srivatsa Vaddagiri
2011-09-13 18:30                                                 ` Peter Zijlstra
2011-09-13 18:35                                                   ` Srivatsa Vaddagiri
2011-09-15 17:55                                                     ` Kamalesh Babulal
2011-09-15 21:48                                                       ` Peter Zijlstra
2011-09-19 17:51                                                         ` Kamalesh Babulal
2011-09-20  0:38                                                           ` Venki Pallipadi
2011-09-20 11:09                                                             ` Kamalesh Babulal
2011-09-20 13:56                                                           ` Peter Zijlstra
2011-09-20 14:04                                                           ` Peter Zijlstra
2011-09-20 12:55                                                       ` Peter Zijlstra
2011-09-21 17:34                                                         ` Kamalesh Babulal
2011-09-13 14:19                               ` Peter Zijlstra
2011-09-13 18:01                                 ` Srivatsa Vaddagiri
2011-09-13 18:23                                   ` Peter Zijlstra
2011-09-16  8:14                                     ` Paul Turner
2011-09-16  8:28                                       ` Peter Zijlstra
2011-09-19 16:35                                         ` Srivatsa Vaddagiri
2011-09-16  8:22                   ` Paul Turner
2011-06-14 10:16   ` CFS Bandwidth Control - Test results of cgroups tasks pinned vs unpinned Hidetoshi Seto
2011-06-14  6:58 ` [patch 00/15] CFS Bandwidth Control V6 Hu Tao
2011-06-14  7:29   ` Hidetoshi Seto
2011-06-14  7:44     ` Hu Tao
2011-06-15  8:37     ` Hu Tao
2011-06-16  0:57       ` Hidetoshi Seto
2011-06-16  9:45         ` Hu Tao
2011-06-17  1:22           ` Hidetoshi Seto
2011-06-17  6:05             ` Hu Tao
2011-06-17  6:25             ` Paul Turner
2011-06-17  9:13               ` Hidetoshi Seto
2011-06-18  0:28                 ` Paul Turner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20110503092905.163081832@google.com \
    --to=pjt@google.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=bharata@linux.vnet.ibm.com \
    --cc=dhaval.giani@gmail.com \
    --cc=kamalesh@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=ncrao@google.com \
    --cc=svaidy@linux.vnet.ibm.com \
    --cc=vatsa@in.ibm.com \
    --cc=xemul@openvz.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.