linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <fweisbec@gmail.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Ingo Molnar <mingo@kernel.org>, Mike Galbraith <efault@gmx.de>,
	Thomas Gleixner <tglx@linutronix.de>
Subject: [NOT-FOR-MERGE-PATCH 3/3] Debug: Alternate old/new version of sched_avg_update calls for profiling
Date: Tue, 14 Jun 2016 17:28:02 +0200	[thread overview]
Message-ID: <1465918082-27005-4-git-send-email-fweisbec@gmail.com> (raw)
In-Reply-To: <1465918082-27005-1-git-send-email-fweisbec@gmail.com>

This way we can measure with the function profiler.

	trace_stat/function0:  sched_avg_update_old                   764    223.936 us      0.293 us        0.017 us
	trace_stat/function0:  sched_avg_update                       764    199.818 us      0.261 us        0.006 us
	trace_stat/function1:  sched_avg_update_old                   252    99.225 us       0.393 us        0.106 us
	trace_stat/function1:  sched_avg_update                       252    59.080 us       0.234 us        0.008 us
	trace_stat/function2:  sched_avg_update_old                   606    202.465 us      0.334 us        0.007 us
	trace_stat/function2:  sched_avg_update                       605    184.083 us      0.304 us        0.002 us
	trace_stat/function3:  sched_avg_update_old                   902    261.790 us      0.290 us        0.010 us
	trace_stat/function3:  sched_avg_update                       901    238.253 us      0.264 us        0.004 us
	trace_stat/function4:  sched_avg_update_old                   120    50.391 us       0.419 us        0.095 us
	trace_stat/function4:  sched_avg_update                       120    35.947 us       0.299 us        0.004 us
	trace_stat/function5:  sched_avg_update_old                   581    195.280 us      0.336 us        0.011 us
	trace_stat/function5:  sched_avg_update                       582    173.594 us      0.298 us        0.003 us
	trace_stat/function6:  sched_avg_update_old                   202    45.201 us       0.223 us        0.049 us
	trace_stat/function6:  sched_avg_update                       201    37.892 us       0.188 us        0.013 us
	trace_stat/function7:  sched_avg_update_old                   200    80.731 us       0.403 us        0.477 us
	trace_stat/function7:  sched_avg_update                       200    41.759 us       0.208 us        0.012 us

Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Not-Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
---
 kernel/sched/core.c  | 16 ++++++++++++++++
 kernel/sched/fair.c  |  7 ++++++-
 kernel/sched/sched.h |  8 +++++++-
 3 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0c0578a..5410e1bb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -663,6 +663,22 @@ bool sched_can_stop_tick(struct rq *rq)
 }
 #endif /* CONFIG_NO_HZ_FULL */
 
+void sched_avg_update_old(struct rq *rq)
+{
+	s64 period = sched_avg_period();
+
+	while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
+		/*
+		 * Inline assembly required to prevent the compiler
+		 * optimising this loop into a divmod call.
+		 * See __iter_div_u64_rem() for another example of this.
+		 */
+		asm("" : "+rm" (rq->age_stamp));
+		rq->age_stamp += period;
+		rq->rt_avg /= 2;
+	}
+}
+
 void sched_avg_update(struct rq *rq)
 {
 	s64 period = sched_avg_period();
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c6dd8ba..1b487bb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4607,6 +4607,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
  * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
  * term.
  */
+DEFINE_PER_CPU(int, cpu_lodd);
 static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
 			    unsigned long pending_updates)
 {
@@ -4647,7 +4648,11 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
 		this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
 	}
 
-	sched_avg_update(this_rq);
+	if (__this_cpu_read(cpu_lodd) % 2)
+		sched_avg_update(this_rq);
+	else
+		sched_avg_update_old(this_rq);
+	__this_cpu_add(cpu_lodd, 1);
 }
 
 /* Used instead of source_load when we know the type == 0 */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 72f1f30..b7e197b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1427,6 +1427,7 @@ static inline int hrtick_enabled(struct rq *rq)
 #endif /* CONFIG_SCHED_HRTICK */
 
 #ifdef CONFIG_SMP
+extern void sched_avg_update_old(struct rq *rq);
 extern void sched_avg_update(struct rq *rq);
 
 #ifndef arch_scale_freq_capacity
@@ -1448,10 +1449,15 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 }
 #endif
 
+DECLARE_PER_CPU(int, cpu_lodd);
 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 	rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
-	sched_avg_update(rq);
+	if (__this_cpu_read(cpu_lodd) % 2)
+		sched_avg_update(rq);
+	else
+		sched_avg_update_old(rq);
+	__this_cpu_add(cpu_lodd, 1);
 }
 #else
 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
-- 
2.7.0

      parent reply	other threads:[~2016-06-14 15:28 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-14 15:27 [PATCH 0/3] sched: Enhance/fix sched_avg_update() Frederic Weisbecker
2016-06-14 15:28 ` [PATCH 1/3] sched: Introduce sched_time_avg_ns minimal value Frederic Weisbecker
2016-06-14 15:28 ` [PATCH 2/3] sched: Unloop sched avg decaying Frederic Weisbecker
2016-06-14 15:58   ` Peter Zijlstra
2016-06-30 12:52     ` Frederic Weisbecker
2016-06-30 13:20       ` Peter Zijlstra
2016-07-06 11:53         ` Frederic Weisbecker
2016-06-14 15:28 ` Frederic Weisbecker [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1465918082-27005-4-git-send-email-fweisbec@gmail.com \
    --to=fweisbec@gmail.com \
    --cc=efault@gmx.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).