From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1162238AbbBDSnj (ORCPT ); Wed, 4 Feb 2015 13:43:39 -0500 Received: from foss-mx-na.foss.arm.com ([217.140.108.86]:41521 "EHLO foss-mx-na.foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1161444AbbBDSax (ORCPT ); Wed, 4 Feb 2015 13:30:53 -0500 From: Morten Rasmussen To: peterz@infradead.org, mingo@redhat.com Cc: vincent.guittot@linaro.org, dietmar.eggemann@arm.com, yuyang.du@intel.com, preeti@linux.vnet.ibm.com, mturquette@linaro.org, nico@linaro.org, rjw@rjwysocki.net, juri.lelli@arm.com, linux-kernel@vger.kernel.org, Paul Turner , Ben Segall Subject: [RFCv3 PATCH 04/48] sched: Make sched entity usage tracking frequency-invariant Date: Wed, 4 Feb 2015 18:30:41 +0000 Message-Id: <1423074685-6336-5-git-send-email-morten.rasmussen@arm.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1423074685-6336-1-git-send-email-morten.rasmussen@arm.com> References: <1423074685-6336-1-git-send-email-morten.rasmussen@arm.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Apply frequency scale-invariance correction factor to usage tracking. Each segment of the running_load_avg geometric series is now scaled by the current frequency so the utilization_avg_contrib of each entity will be invariant with frequency scaling. As a result, utilization_load_avg which is the sum of utilization_avg_contrib, becomes invariant too. So the usage level that is returned by get_cpu_usage, stays relative to the max frequency as the cpu_capacity which is is compared against. Then, we want the keep the load tracking values in a 32bits type, which implies that the max value of {runnable|running}_avg_sum must be lower than 2^32/88761=48388 (88761 is the max weigth of a task). As LOAD_AVG_MAX = 47742, arch_scale_freq_capacity must return a value less than (48388/47742) << SCHED_CAPACITY_SHIFT = 1037 (SCHED_SCALE_CAPACITY = 1024). So we define the range to [0..SCHED_SCALE_CAPACITY] in order to avoid overflow. cc: Paul Turner cc: Ben Segall Signed-off-by: Morten Rasmussen Signed-off-by: Vincent Guittot --- kernel/sched/fair.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 35fd296..b6fb7c4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2472,6 +2472,8 @@ static u32 __compute_runnable_contrib(u64 n) return contrib + runnable_avg_yN_sum[n]; } +unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu); + /* * We can represent the historical contribution to runnable average as the * coefficients of a geometric series. To do this we sub-divide our runnable @@ -2500,7 +2502,7 @@ static u32 __compute_runnable_contrib(u64 n) * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] */ -static __always_inline int __update_entity_runnable_avg(u64 now, +static __always_inline int __update_entity_runnable_avg(u64 now, int cpu, struct sched_avg *sa, int runnable, int running) @@ -2508,6 +2510,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now, u64 delta, periods; u32 runnable_contrib; int delta_w, decayed = 0; + unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu); delta = now - sa->last_runnable_update; /* @@ -2543,7 +2546,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now, if (runnable) sa->runnable_avg_sum += delta_w; if (running) - sa->running_avg_sum += delta_w; + sa->running_avg_sum += delta_w * scale_freq + >> SCHED_CAPACITY_SHIFT; sa->avg_period += delta_w; delta -= delta_w; @@ -2564,7 +2568,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now, if (runnable) sa->runnable_avg_sum += runnable_contrib; if (running) - sa->running_avg_sum += runnable_contrib; + sa->running_avg_sum += runnable_contrib * scale_freq + >> SCHED_CAPACITY_SHIFT; sa->avg_period += runnable_contrib; } @@ -2572,7 +2577,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now, if (runnable) sa->runnable_avg_sum += delta; if (running) - sa->running_avg_sum += delta; + sa->running_avg_sum += delta * scale_freq + >> SCHED_CAPACITY_SHIFT; sa->avg_period += delta; return decayed; @@ -2680,8 +2686,8 @@ static inline void __update_group_entity_contrib(struct sched_entity *se) static inline void update_rq_runnable_avg(struct rq *rq, int runnable) { - __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable, - runnable); + __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg, + runnable, runnable); __update_tg_runnable_avg(&rq->avg, &rq->cfs); } #else /* CONFIG_FAIR_GROUP_SCHED */ @@ -2759,6 +2765,7 @@ static inline void update_entity_load_avg(struct sched_entity *se, { struct cfs_rq *cfs_rq = cfs_rq_of(se); long contrib_delta, utilization_delta; + int cpu = cpu_of(rq_of(cfs_rq)); u64 now; /* @@ -2770,7 +2777,7 @@ static inline void update_entity_load_avg(struct sched_entity *se, else now = cfs_rq_clock_task(group_cfs_rq(se)); - if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq, + if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq, cfs_rq->curr == se)) return; -- 1.9.1