linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thara Gopinath <thara.gopinath@linaro.org>
To: linux-kernel@vger.kernel.org, mingo@redhat.com,
	peterz@infradead.org, rui.zhang@intel.com
Cc: gregkh@linuxfoundation.org, rafael@kernel.org,
	amit.kachhap@gmail.com, viresh.kumar@linaro.org,
	javi.merino@kernel.org, edubezval@gmail.com,
	daniel.lezcano@linaro.org, linux-pm@vger.kernel.org,
	quentin.perret@arm.com, ionela.voinescu@arm.com,
	vincent.guittot@linaro.org
Subject: [RFC PATCH 1/7] sched/pelt.c: Add option to make load and util calculations frequency invariant
Date: Tue,  9 Oct 2018 12:24:56 -0400	[thread overview]
Message-ID: <1539102302-9057-2-git-send-email-thara.gopinath@linaro.org> (raw)
In-Reply-To: <1539102302-9057-1-git-send-email-thara.gopinath@linaro.org>

Add an additional parametr in accumulate_sum to allow optional
frequency adjustment of load and utilization. When considering
rt/dl load/util, it is correct to scale it to the current cpu
frequency. On the other hand, thermal pressure(max capped frequency)
is frequency invariant.

Signed-off-by: Thara Gopinath <thara.gopinath@linaro.org>
---
 kernel/sched/pelt.c | 26 +++++++++++++++-----------
 1 file changed, 15 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 35475c0..05b8798 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -107,7 +107,8 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
  */
 static __always_inline u32
 accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
-	       unsigned long load, unsigned long runnable, int running)
+	       unsigned long load, unsigned long runnable, int running,
+		int freq_adjusted)
 {
 	unsigned long scale_freq, scale_cpu;
 	u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
@@ -137,7 +138,8 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
 	}
 	sa->period_contrib = delta;
 
-	contrib = cap_scale(contrib, scale_freq);
+	if (freq_adjusted)
+		contrib = cap_scale(contrib, scale_freq);
 	if (load)
 		sa->load_sum += load * contrib;
 	if (runnable)
@@ -178,7 +180,8 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
  */
 static __always_inline int
 ___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
-		  unsigned long load, unsigned long runnable, int running)
+		  unsigned long load, unsigned long runnable, int running,
+			int freq_adjusted)
 {
 	u64 delta;
 
@@ -221,7 +224,8 @@ ___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
 	 * Step 1: accumulate *_sum since last_update_time. If we haven't
 	 * crossed period boundaries, finish.
 	 */
-	if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
+	if (!accumulate_sum(delta, cpu, sa, load, runnable, running,
+						freq_adjusted))
 		return 0;
 
 	return 1;
@@ -272,7 +276,7 @@ int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
 	if (entity_is_task(se))
 		se->runnable_weight = se->load.weight;
 
-	if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
+	if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0, 1)) {
 		___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
 		return 1;
 	}
@@ -286,7 +290,7 @@ int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_e
 		se->runnable_weight = se->load.weight;
 
 	if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
-				cfs_rq->curr == se)) {
+				cfs_rq->curr == se, 1)) {
 
 		___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
 		cfs_se_util_change(&se->avg);
@@ -301,7 +305,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
 	if (___update_load_sum(now, cpu, &cfs_rq->avg,
 				scale_load_down(cfs_rq->load.weight),
 				scale_load_down(cfs_rq->runnable_weight),
-				cfs_rq->curr != NULL)) {
+				cfs_rq->curr != NULL, 1)) {
 
 		___update_load_avg(&cfs_rq->avg, 1, 1);
 		return 1;
@@ -326,7 +330,7 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
 	if (___update_load_sum(now, rq->cpu, &rq->avg_rt,
 				running,
 				running,
-				running)) {
+				running, 1)) {
 
 		___update_load_avg(&rq->avg_rt, 1, 1);
 		return 1;
@@ -349,7 +353,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
 	if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
 				running,
 				running,
-				running)) {
+				running, 1)) {
 
 		___update_load_avg(&rq->avg_dl, 1, 1);
 		return 1;
@@ -385,11 +389,11 @@ int update_irq_load_avg(struct rq *rq, u64 running)
 	ret = ___update_load_sum(rq->clock - running, rq->cpu, &rq->avg_irq,
 				0,
 				0,
-				0);
+				0, 1);
 	ret += ___update_load_sum(rq->clock, rq->cpu, &rq->avg_irq,
 				1,
 				1,
-				1);
+				1, 1);
 
 	if (ret)
 		___update_load_avg(&rq->avg_irq, 1, 1);
-- 
2.1.4


  reply	other threads:[~2018-10-09 16:25 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20181009162509epcas1p4fdd2e23039caa24586a4a52c6d2e7336@epcas1p4.samsung.com>
2018-10-09 16:24 ` [RFC PATCH 0/7] Introduce thermal pressure Thara Gopinath
2018-10-09 16:24   ` Thara Gopinath [this message]
2018-10-09 16:24   ` [RFC PATCH 2/7] sched/pelt.c: Add support to track " Thara Gopinath
2018-10-09 16:24   ` [RFC PATCH 3/7] sched: Add infrastructure to store and update instantaneous " Thara Gopinath
2018-10-09 16:24   ` [RFC PATCH 4/7] sched: Initialize per cpu thermal pressure structure Thara Gopinath
2018-10-09 16:25   ` [RFC PATCH 5/7] sched/fair: Enable CFS periodic tick to update thermal pressure Thara Gopinath
2018-12-04 15:43     ` Vincent Guittot
2018-10-09 16:25   ` [RFC PATCH 6/7] sched/fair: update cpu_capcity to reflect " Thara Gopinath
2018-10-10  5:57     ` Javi Merino
2018-10-10 14:22       ` Thara Gopinath
2018-10-09 16:25   ` [RFC PATCH 7/7] thermal/cpu-cooling: Update thermal pressure in case of a maximum frequency capping Thara Gopinath
2018-10-10  5:44   ` [RFC PATCH 0/7] Introduce thermal pressure Javi Merino
2018-10-10 14:15     ` Thara Gopinath
2018-10-10  6:17   ` Ingo Molnar
2018-10-10  8:29     ` Quentin Perret
2018-10-10  8:50       ` Vincent Guittot
2018-10-10  9:55         ` Quentin Perret
2018-10-10 10:14           ` Vincent Guittot
2018-10-10 10:36             ` Quentin Perret
2018-10-10 12:04               ` Vincent Guittot
2018-10-10 12:23                 ` Juri Lelli
2018-10-10 12:34                   ` Vincent Guittot
2018-10-10 12:50                     ` Juri Lelli
2018-10-10 13:08                       ` Vincent Guittot
2018-10-10 13:34                         ` Juri Lelli
2018-10-10 13:38                           ` Vincent Guittot
2018-10-10 17:08                           ` Thara Gopinath
2018-10-10 13:11                       ` Quentin Perret
2018-10-10 13:05                 ` Quentin Perret
2018-10-10 13:27                   ` Vincent Guittot
2018-10-10 13:47                     ` Quentin Perret
2018-10-10 15:19                       ` Vincent Guittot
2018-10-10 16:15                       ` Ionela Voinescu
2018-10-10 17:03           ` Thara Gopinath
2018-10-10 15:43     ` Thara Gopinath
2018-10-16  7:33       ` Ingo Molnar
2018-10-16  9:28         ` Lukasz Luba
2018-10-17 16:21         ` Thara Gopinath
2018-10-18  6:48           ` Ingo Molnar
2018-10-18  7:08             ` Rafael J. Wysocki
2018-10-18  7:50               ` Ingo Molnar
2018-10-18  8:14                 ` Rafael J. Wysocki
2018-10-18  9:35                   ` [PATCH 1/2] sched/cpufreq: Reorganize the cpufreq files Daniel Lezcano
2018-10-18  9:35                     ` [PATCH 2/2] sched/cpufreq: Add the SPDX tags Daniel Lezcano
2018-10-18  9:42                     ` [PATCH 1/2] sched/cpufreq: Reorganize the cpufreq files Rafael J. Wysocki
2018-10-18  9:54                       ` Daniel Lezcano
2018-10-18 10:06                         ` Rafael J. Wysocki
2018-10-18 10:13                           ` Daniel Lezcano
2018-10-18  9:45                     ` Daniel Lezcano
2018-10-19  5:24                     ` kbuild test robot
2018-10-19  5:52                     ` kbuild test robot
2018-10-18  9:44                   ` [PATCH V2 " Daniel Lezcano
2018-10-18  9:44                     ` [PATCH V2 2/2] sched/cpufreq: Add the SPDX tags Daniel Lezcano
2018-10-18 16:17             ` [RFC PATCH 0/7] Introduce thermal pressure Thara Gopinath
2018-10-19  8:02               ` Ingo Molnar
2018-10-19 11:29                 ` Valentin Schneider
2018-10-10 15:35   ` Lukasz Luba
2018-10-10 16:54     ` Daniel Lezcano
2018-10-11  7:35       ` Lukasz Luba
2018-10-11  8:23         ` Daniel Lezcano
2018-10-12  9:37           ` Lukasz Luba
2018-10-10 17:30     ` Thara Gopinath
2018-10-11 11:10       ` Lukasz Luba
2018-10-16 17:11         ` Vincent Guittot
2018-10-17 16:24           ` Thara Gopinath
2018-10-18  8:00             ` Lukasz Luba
2018-10-18  8:12           ` Lukasz Luba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1539102302-9057-2-git-send-email-thara.gopinath@linaro.org \
    --to=thara.gopinath@linaro.org \
    --cc=amit.kachhap@gmail.com \
    --cc=daniel.lezcano@linaro.org \
    --cc=edubezval@gmail.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=ionela.voinescu@arm.com \
    --cc=javi.merino@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=quentin.perret@arm.com \
    --cc=rafael@kernel.org \
    --cc=rui.zhang@intel.com \
    --cc=vincent.guittot@linaro.org \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).