All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Steve Muckle <steve.muckle@linaro.org>
Cc: Ingo Molnar <mingo@redhat.com>,
	"Rafael J. Wysocki" <rafael@kernel.org>,
	linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Morten Rasmussen <morten.rasmussen@arm.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Juri Lelli <Juri.Lelli@arm.com>,
	Patrick Bellasi <patrick.bellasi@arm.com>,
	Michael Turquette <mturquette@baylibre.com>,
	Byungchul Park <byungchul.park@lge.com>
Subject: Re: [RFC PATCH] sched/fair: call cpufreq hook in additional paths
Date: Thu, 31 Mar 2016 09:59:51 +0200	[thread overview]
Message-ID: <20160331075951.GG3408@twins.programming.kicks-ass.net> (raw)
In-Reply-To: <1458858367-2831-1-git-send-email-smuckle@linaro.org>

On Thu, Mar 24, 2016 at 03:26:07PM -0700, Steve Muckle wrote:
> Note that this patch depends on the 2 patches I sent several days ago:
> http://thread.gmane.org/gmane.linux.kernel/2181498

Right; I had something similar for a little while..

> Unfortunately this means that in the migration case,
> enqueue_entity_load_avg() will end up calling the cpufreq hook twice -
> once via update_cfs_rq_load_avg() and once via
> attach_entity_load_avg(). 

Right; this is the point where I gave up when I looked at that.

> This should ideally get filtered out before
> the cpufreq driver is invoked but nevertheless is wasteful. Possible
> alternatives to this are
> 
>  - moving the cpufreq hook from update_cfs_rq_load_avg() into                                                    
>    its callers (there are three) and also putting the hook                                                       
>    in attach_task_cfs_rq and detach_task_cfs_rq, resulting in                                                    
>    five call sites of the hook in fair.c as opposed to three                                                               

Its worse; you get a whole nest of extra logic to determine when to call
what. See below (now arguably its still early so I might have made it
more complex than it needed to be..).

> 
>  - passing an argument into attach_entity_load_avg() to indicate                                                 
>    whether calling the cpufreq hook is necessary
> 
> Both of these are ugly in their own way but would avoid a runtime
> cost. Opinions welcome.

Lemme see what this would look like while I throw the below into the bit
bucket.



---
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2854,23 +2854,23 @@ static inline void cfs_rq_util_change(st
 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 {
 	struct sched_avg *sa = &cfs_rq->avg;
-	int decayed, removed_load = 0, removed_util = 0;
+	int ret = 0;
 
 	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
 		s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
 		sa->load_avg = max_t(long, sa->load_avg - r, 0);
 		sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
-		removed_load = 1;
+		ret |= 1 << 1;
 	}
 
 	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
 		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
 		sa->util_avg = max_t(long, sa->util_avg - r, 0);
 		sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
-		removed_util = 1;
+		ret |= 1 << 2;
 	}
 
-	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+	decayed |= __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
 		scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
 
 #ifndef CONFIG_64BIT
@@ -2878,10 +2878,7 @@ static inline int update_cfs_rq_load_avg
 	cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-	if (decayed || removed_util)
-		cfs_rq_util_change(cfs_rq);
-
-	return decayed || removed_load;
+	return ret;
 }
 
 /* Update task and its cfs_rq load average */
@@ -2891,6 +2888,7 @@ static inline void update_load_avg(struc
 	u64 now = cfs_rq_clock_task(cfs_rq);
 	struct rq *rq = rq_of(cfs_rq);
 	int cpu = cpu_of(rq);
+	int updated;
 
 	/*
 	 * Track task load average for carrying it to new CPU after migrated, and
@@ -2900,9 +2898,14 @@ static inline void update_load_avg(struc
 			  se->on_rq * scale_load_down(se->load.weight),
 			  cfs_rq->curr == se, NULL);
 
-	if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
+	updated = update_cfs_rq_load_avg(now, cfs_rq);
+
+	if (updated & 5)
+		cfs_rq_util_change(cfs_rq);
+
+	if (update_tg && (updated & 3))
 		update_tg_load_avg(cfs_rq, 0);
-}
+
 
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -2953,7 +2956,7 @@ enqueue_entity_load_avg(struct cfs_rq *c
 {
 	struct sched_avg *sa = &se->avg;
 	u64 now = cfs_rq_clock_task(cfs_rq);
-	int migrated, decayed;
+	int migrated, updated;
 
 	migrated = !sa->last_update_time;
 	if (!migrated) {
@@ -2962,16 +2965,18 @@ enqueue_entity_load_avg(struct cfs_rq *c
 			cfs_rq->curr == se, NULL);
 	}
 
-	decayed = update_cfs_rq_load_avg(now, cfs_rq);
+	updated = update_cfs_rq_load_avg(now, cfs_rq);
 
 	cfs_rq->runnable_load_avg += sa->load_avg;
 	cfs_rq->runnable_load_sum += sa->load_sum;
 
-	if (migrated)
+	if (migrated) {
 		attach_entity_load_avg(cfs_rq, se);
-
-	if (decayed || migrated)
-		update_tg_load_avg(cfs_rq, 0);
+		if (updated & 3)
+			update_tg_load_avg(cfs_rq, 0);
+	} else if (updated & 5) {
+		cfs_rq_util_change(cfs_rq);
+	}
 }
 
 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
@@ -6157,6 +6162,7 @@ static void update_blocked_averages(int
 	struct rq *rq = cpu_rq(cpu);
 	struct cfs_rq *cfs_rq;
 	unsigned long flags;
+	int updated;
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
 	update_rq_clock(rq);
@@ -6170,7 +6176,10 @@ static void update_blocked_averages(int
 		if (throttled_hierarchy(cfs_rq))
 			continue;
 
-		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+		updated = update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+		if (updated & 5)
+			cfs_rq_util_change(cfs_rq);
+		if (updated & 3)
 			update_tg_load_avg(cfs_rq, 0);
 	}
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -6228,10 +6237,13 @@ static inline void update_blocked_averag
 	struct rq *rq = cpu_rq(cpu);
 	struct cfs_rq *cfs_rq = &rq->cfs;
 	unsigned long flags;
+	int updated;
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
 	update_rq_clock(rq);
-	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+	updated = update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+	if (updated & 5)
+		cfs_rq_util_change(cfs_rq);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 

  reply	other threads:[~2016-03-31  7:59 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-24 22:26 [RFC PATCH] sched/fair: call cpufreq hook in additional paths Steve Muckle
2016-03-31  7:59 ` Peter Zijlstra [this message]
2016-03-31  9:14   ` Peter Zijlstra
2016-03-31 11:50     ` Rafael J. Wysocki
2016-04-01 21:40     ` Steve Muckle
2016-04-01 21:53       ` Peter Zijlstra
2016-04-23 12:58 ` [tip:sched/core] sched/fair: Call " tip-bot for Steve Muckle

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160331075951.GG3408@twins.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=Juri.Lelli@arm.com \
    --cc=byungchul.park@lge.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=morten.rasmussen@arm.com \
    --cc=mturquette@baylibre.com \
    --cc=patrick.bellasi@arm.com \
    --cc=rafael@kernel.org \
    --cc=steve.muckle@linaro.org \
    --cc=vincent.guittot@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.