All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: mingo@kernel.org, juri.lelli@redhat.com
Cc: linux-kernel@vger.kernel.org, dietmar.eggemann@arm.com,
	luca.abeni@santannapisa.it, bristot@redhat.com,
	balsini@android.com, dvyukov@google.com, tglx@linutronix.de,
	vpillai@digitalocean.com, rostedt@goodmis.org,
	peterz@infradead.org
Subject: [RFC][PATCH 09/13] sched: Unify runtime accounting across classes
Date: Fri, 26 Jul 2019 16:54:18 +0200	[thread overview]
Message-ID: <20190726161357.883133230@infradead.org> (raw)
In-Reply-To: 20190726145409.947503076@infradead.org

All classes use sched_entity::exec_start to track runtime and have
copies of the exact same code around to compute runtime.

Collapse all that.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/sched.h    |    2 -
 kernel/sched/deadline.c  |   17 ++-------------
 kernel/sched/fair.c      |   50 ++++++++++++++++++++++++++++++++++++-----------
 kernel/sched/rt.c        |   17 ++-------------
 kernel/sched/sched.h     |    2 +
 kernel/sched/stop_task.c |   16 ---------------
 6 files changed, 49 insertions(+), 55 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -433,7 +433,7 @@ struct sched_statistics {
 
 	u64				block_start;
 	u64				block_max;
-	u64				exec_max;
+	s64				exec_max;
 	u64				slice_max;
 
 	u64				nr_migrations_cold;
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1147,9 +1147,8 @@ static void update_curr_dl(struct rq *rq
 {
 	struct task_struct *curr = rq->curr;
 	struct sched_dl_entity *dl_se = &curr->dl;
-	u64 delta_exec, scaled_delta_exec;
+	s64 delta_exec, scaled_delta_exec;
 	int cpu = cpu_of(rq);
-	u64 now;
 
 	if (!dl_task(curr) || !on_dl_rq(dl_se))
 		return;
@@ -1162,23 +1161,13 @@ static void update_curr_dl(struct rq *rq
 	 * natural solution, but the full ramifications of this
 	 * approach need further study.
 	 */
-	now = rq_clock_task(rq);
-	delta_exec = now - curr->se.exec_start;
-	if (unlikely((s64)delta_exec <= 0)) {
+	delta_exec = update_curr_common(rq);
+	if (unlikely(delta_exec <= 0)) {
 		if (unlikely(dl_se->dl_yielded))
 			goto throttle;
 		return;
 	}
 
-	schedstat_set(curr->se.statistics.exec_max,
-		      max(curr->se.statistics.exec_max, delta_exec));
-
-	curr->se.sum_exec_runtime += delta_exec;
-	account_group_exec_runtime(curr, delta_exec);
-
-	curr->se.exec_start = now;
-	cgroup_account_cputime(curr, delta_exec);
-
 	if (dl_entity_is_special(dl_se))
 		return;
 
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -828,30 +828,58 @@ static void update_tg_load_avg(struct cf
 }
 #endif /* CONFIG_SMP */
 
+static s64 update_curr_se(struct rq *rq, struct sched_entity *curr)
+{
+	u64 now = rq_clock_task(rq);
+	s64 delta_exec;
+
+	delta_exec = now - curr->exec_start;
+	if (unlikely(delta_exec <= 0))
+		return delta_exec;
+
+	curr->exec_start = now;
+	curr->sum_exec_runtime += delta_exec;
+
+	schedstat_set(curr->statistics.exec_max,
+		      max(delta_exec, curr->statistics.exec_max));
+
+	return delta_exec;
+}
+
+/*
+ * Used by other classes to account runtime.
+ */
+s64 update_curr_common(struct rq *rq)
+{
+	struct task_struct *curr = rq->curr;
+	s64 delta_exec;
+
+	delta_exec = update_curr_se(rq, &curr->se);
+	if (unlikely(delta_exec <= 0))
+		return delta_exec;
+
+	account_group_exec_runtime(curr, delta_exec);
+	cgroup_account_cputime(curr, delta_exec);
+
+	return delta_exec;
+}
+
 /*
  * Update the current task's runtime statistics.
  */
 static void update_curr(struct cfs_rq *cfs_rq)
 {
 	struct sched_entity *curr = cfs_rq->curr;
-	u64 now = rq_clock_task(rq_of(cfs_rq));
-	u64 delta_exec;
+	s64 delta_exec;
 
 	if (unlikely(!curr))
 		return;
 
-	delta_exec = now - curr->exec_start;
-	if (unlikely((s64)delta_exec <= 0))
+	delta_exec = update_curr_se(rq_of(cfs_rq), curr);
+	if (unlikely(delta_exec <= 0))
 		return;
 
-	curr->exec_start = now;
-
-	schedstat_set(curr->statistics.exec_max,
-		      max(delta_exec, curr->statistics.exec_max));
-
-	curr->sum_exec_runtime += delta_exec;
 	schedstat_add(cfs_rq->exec_clock, delta_exec);
-
 	curr->vruntime += calc_delta_fair(delta_exec, curr);
 	update_min_vruntime(cfs_rq);
 
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -955,26 +955,15 @@ static void update_curr_rt(struct rq *rq
 {
 	struct task_struct *curr = rq->curr;
 	struct sched_rt_entity *rt_se = &curr->rt;
-	u64 delta_exec;
-	u64 now;
+	s64 delta_exec;
 
 	if (curr->sched_class != &rt_sched_class)
 		return;
 
-	now = rq_clock_task(rq);
-	delta_exec = now - curr->se.exec_start;
-	if (unlikely((s64)delta_exec <= 0))
+	delta_exec = update_curr_common(rq);
+	if (unlikely(delta_exec < 0))
 		return;
 
-	schedstat_set(curr->se.statistics.exec_max,
-		      max(curr->se.statistics.exec_max, delta_exec));
-
-	curr->se.sum_exec_runtime += delta_exec;
-	account_group_exec_runtime(curr, delta_exec);
-
-	curr->se.exec_start = now;
-	cgroup_account_cputime(curr, delta_exec);
-
 	if (!rt_bandwidth_enabled())
 		return;
 
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1689,6 +1689,8 @@ extern const u32		sched_prio_to_wmult[40
 
 #define RETRY_TASK		((void *)-1UL)
 
+extern s64 update_curr_common(struct rq *rq);
+
 struct sched_class {
 	const struct sched_class *next;
 
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -62,21 +62,7 @@ static void yield_task_stop(struct rq *r
 
 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-	struct task_struct *curr = rq->curr;
-	u64 delta_exec;
-
-	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
-	if (unlikely((s64)delta_exec < 0))
-		delta_exec = 0;
-
-	schedstat_set(curr->se.statistics.exec_max,
-			max(curr->se.statistics.exec_max, delta_exec));
-
-	curr->se.sum_exec_runtime += delta_exec;
-	account_group_exec_runtime(curr, delta_exec);
-
-	curr->se.exec_start = rq_clock_task(rq);
-	cgroup_account_cputime(curr, delta_exec);
+	update_curr_common(rq);
 }
 
 /*



  parent reply	other threads:[~2019-07-26 16:21 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-26 14:54 [RFC][PATCH 00/13] SCHED_DEADLINE server infrastructure Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 01/13] sched/deadline: Impose global limits on sched_attr::sched_period Peter Zijlstra
2019-07-29  8:57   ` Juri Lelli
2019-07-29 11:45     ` Daniel Bristot de Oliveira
2019-08-02 17:21   ` Alessio Balsini
2019-08-05 11:53     ` Peter Zijlstra
2019-08-22 12:29       ` Alessio Balsini
2019-08-22 16:51         ` Peter Zijlstra
2019-08-31 14:41           ` Alessio Balsini
2019-09-02  9:16             ` Peter Zijlstra
2019-09-02 12:31               ` Peter Zijlstra
2019-09-04 10:16               ` Steven Rostedt
2019-09-04 11:30                 ` Peter Zijlstra
2019-09-04 13:24                   ` Joel Fernandes
2019-09-04 14:11                     ` Will Deacon
2019-09-04 14:35                       ` Joel Fernandes
2019-09-04 15:52                     ` Peter Zijlstra
2019-10-23 17:17       ` [PATCH 4.4 4.9 4.14] loop: Add LOOP_SET_DIRECT_IO to compat ioctl Alessio Balsini
2019-10-23 17:22         ` Alessio Balsini
2019-10-25  0:17         ` Sasha Levin
2020-05-20 18:38   ` [RFC][PATCH 01/13] sched/deadline: Impose global limits on sched_attr::sched_period Juri Lelli
2020-05-21 13:45     ` Daniel Bristot de Oliveira
2020-06-16 12:21   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 02/13] stop_machine: Fix stop_cpus_in_progress ordering Peter Zijlstra
2019-07-30 13:16   ` Phil Auld
2019-07-30 13:22   ` Steven Rostedt
2019-07-26 14:54 ` [RFC][PATCH 03/13] sched: Fix kerneldoc comment for ia64_set_curr_task Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 04/13] sched/{rt,deadline}: Fix set_next_task vs pick_next_task Peter Zijlstra
2019-07-29  9:25   ` Juri Lelli
2019-07-29 11:15     ` Peter Zijlstra
2019-07-29 11:27       ` Juri Lelli
2019-07-29 13:04         ` Peter Zijlstra
2019-07-29 13:17           ` Juri Lelli
2019-07-29 14:40             ` Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 05/13] sched: Add task_struct pointer to sched_class::set_curr_task Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 06/13] sched/fair: Export newidle_balance() Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 07/13] sched: Allow put_prev_task() to drop rq->lock Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 08/13] sched: Rework pick_next_task() slow-path Peter Zijlstra
2019-07-26 14:54 ` Peter Zijlstra [this message]
2019-07-26 14:54 ` [RFC][PATCH 10/13] sched/deadline: Collect sched_dl_entity initialization Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 11/13] sched/deadline: Move bandwidth accounting into {en,de}queue_dl_entity Peter Zijlstra
2019-07-26 14:54 ` [RFC][PATCH 12/13] sched/deadline: Introduce deadline servers Peter Zijlstra
2019-08-07 16:31   ` Dietmar Eggemann
2019-08-08  6:52     ` Juri Lelli
2019-08-08  7:52       ` Dietmar Eggemann
2019-08-08  7:56     ` Peter Zijlstra
2019-08-08  8:11       ` Dietmar Eggemann
2019-08-08  8:46         ` Juri Lelli
2019-08-08  8:57           ` Dietmar Eggemann
2019-08-08  9:27             ` Juri Lelli
2019-08-08  9:45               ` Juri Lelli
2019-08-30 11:24                 ` Peter Zijlstra
2019-09-06  9:36                   ` Juri Lelli
2019-08-08 10:31           ` Peter Zijlstra
2019-08-09  7:13             ` Juri Lelli
2019-08-08  6:59   ` Juri Lelli
2019-08-09  9:17   ` Dietmar Eggemann
2019-08-09 12:16     ` Juri Lelli
2019-07-26 14:54 ` [RFC][PATCH 13/13] sched/fair: Add trivial fair server Peter Zijlstra
2019-07-26 20:01 ` [RFC][PATCH 00/13] SCHED_DEADLINE server infrastructure luca abeni
2019-09-03 14:27 ` Alessio Balsini
2019-09-04 10:50   ` Juri Lelli
2019-09-04 11:32     ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190726161357.883133230@infradead.org \
    --to=peterz@infradead.org \
    --cc=balsini@android.com \
    --cc=bristot@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dvyukov@google.com \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luca.abeni@santannapisa.it \
    --cc=mingo@kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=vpillai@digitalocean.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.