From: Dietmar Eggemann <dietmar.eggemann@arm.com>
To: Ingo Molnar <mingo@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Vincent Guittot <vincent.guittot@linaro.org>,
Morten Rasmussen <morten.rasmussen@arm.com>,
Vincent Donnefort <vdonnefort@google.com>
Cc: Quentin Perret <qperret@google.com>,
Patrick Bellasi <patrick.bellasi@matbug.net>,
Abhijeet Dharmapurikar <adharmap@quicinc.com>,
Jian-Min <Jian-Min.Liu@mediatek.com>,
Qais Yousef <qais.yousef@arm.com>,
linux-kernel@vger.kernel.org
Subject: [RFC PATCH 1/1] sched/pelt: Introduce PELT multiplier
Date: Mon, 29 Aug 2022 07:54:50 +0200 [thread overview]
Message-ID: <20220829055450.1703092-2-dietmar.eggemann@arm.com> (raw)
In-Reply-To: <20220829055450.1703092-1-dietmar.eggemann@arm.com>
From: Vincent Donnefort <vincent.donnefort@arm.com>
The new sysctl sched_pelt_multiplier allows a user to set a clock
multiplier to x2 or x4 (x1 being the default). This clock multiplier
artificially speeds up PELT ramp up/down similarly to use a faster
half-life than the default 32ms.
- x1: 32ms half-life
- x2: 16ms half-life
- x4: 8ms half-life
Internally, a new clock is created: rq->clock_task_mult. It sits in the
clock hierarchy between rq->clock_task and rq->clock_pelt.
Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com>
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/core.c | 2 +-
kernel/sched/pelt.c | 60 ++++++++++++++++++++++++++++++++++++++++++++
kernel/sched/pelt.h | 42 ++++++++++++++++++++++++++++---
kernel/sched/sched.h | 1 +
4 files changed, 100 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 603a80ec9b0e..6203cead4ad3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -727,7 +727,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal);
#endif
- update_rq_clock_pelt(rq, delta);
+ update_rq_clock_task_mult(rq, delta);
}
void update_rq_clock(struct rq *rq)
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 9adfc4744544..9fa853a64269 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -472,3 +472,63 @@ int update_irq_load_avg(struct rq *rq, u64 running)
return ret;
}
#endif
+
+__read_mostly unsigned int sched_pelt_lshift;
+
+#ifdef CONFIG_SYSCTL
+static unsigned int sysctl_sched_pelt_multiplier = 1;
+
+int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ static DEFINE_MUTEX(mutex);
+ unsigned int old;
+ int ret;
+
+ mutex_lock(&mutex);
+ old = sysctl_sched_pelt_multiplier;
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (ret)
+ goto undo;
+ if (!write)
+ goto done;
+
+ switch (sysctl_sched_pelt_multiplier) {
+ case 1:
+ fallthrough;
+ case 2:
+ fallthrough;
+ case 4:
+ WRITE_ONCE(sched_pelt_lshift,
+ sysctl_sched_pelt_multiplier >> 1);
+ goto done;
+ default:
+ ret = -EINVAL;
+ }
+
+undo:
+ sysctl_sched_pelt_multiplier = old;
+done:
+ mutex_unlock(&mutex);
+
+ return ret;
+}
+
+static struct ctl_table sched_pelt_sysctls[] = {
+ {
+ .procname = "sched_pelt_multiplier",
+ .data = &sysctl_sched_pelt_multiplier,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_pelt_multiplier,
+ },
+ {}
+};
+
+static int __init sched_pelt_sysctl_init(void)
+{
+ register_sysctl_init("kernel", sched_pelt_sysctls);
+ return 0;
+}
+late_initcall(sched_pelt_sysctl_init);
+#endif
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 3a0e0dc28721..9b35b5072bae 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -61,6 +61,14 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
WRITE_ONCE(avg->util_est.enqueued, enqueued);
}
+static inline u64 rq_clock_task_mult(struct rq *rq)
+{
+ lockdep_assert_rq_held(rq);
+ assert_clock_updated(rq);
+
+ return rq->clock_task_mult;
+}
+
static inline u64 rq_clock_pelt(struct rq *rq)
{
lockdep_assert_rq_held(rq);
@@ -72,7 +80,7 @@ static inline u64 rq_clock_pelt(struct rq *rq)
/* The rq is idle, we can sync to clock_task */
static inline void _update_idle_rq_clock_pelt(struct rq *rq)
{
- rq->clock_pelt = rq_clock_task(rq);
+ rq->clock_pelt = rq_clock_task_mult(rq);
u64_u32_store(rq->clock_idle, rq_clock(rq));
/* Paired with smp_rmb in migrate_se_pelt_lag() */
@@ -121,6 +129,27 @@ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
rq->clock_pelt += delta;
}
+extern unsigned int sched_pelt_lshift;
+
+/*
+ * absolute time |1 |2 |3 |4 |5 |6 |
+ * @ mult = 1 --------****************--------****************-
+ * @ mult = 2 --------********----------------********---------
+ * @ mult = 4 --------****--------------------****-------------
+ * clock task mult
+ * @ mult = 2 | | |2 |3 | | | | |5 |6 | | |
+ * @ mult = 4 | | | | |2|3| | | | | | | | | | |5|6| | | | | | |
+ *
+ */
+static inline void update_rq_clock_task_mult(struct rq *rq, s64 delta)
+{
+ delta <<= READ_ONCE(sched_pelt_lshift);
+
+ rq->clock_task_mult += delta;
+
+ update_rq_clock_pelt(rq, delta);
+}
+
/*
* When rq becomes idle, we have to check if it has lost idle time
* because it was fully busy. A rq is fully used when the /Sum util_sum
@@ -147,7 +176,7 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq)
* rq's clock_task.
*/
if (util_sum >= divider)
- rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
+ rq->lost_idle_time += rq_clock_task_mult(rq) - rq->clock_pelt;
_update_idle_rq_clock_pelt(rq);
}
@@ -218,13 +247,18 @@ update_irq_load_avg(struct rq *rq, u64 running)
return 0;
}
-static inline u64 rq_clock_pelt(struct rq *rq)
+static inline u64 rq_clock_task_mult(struct rq *rq)
{
return rq_clock_task(rq);
}
+static inline u64 rq_clock_pelt(struct rq *rq)
+{
+ return rq_clock_task_mult(rq);
+}
+
static inline void
-update_rq_clock_pelt(struct rq *rq, s64 delta) { }
+update_rq_clock_task_mult(struct rq *rq, s64 delta) { }
static inline void
update_idle_rq_clock_pelt(struct rq *rq) { }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index da17be6f27fd..62fc3cf10ea7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1007,6 +1007,7 @@ struct rq {
u64 clock;
/* Ensure that all clocks are in the same cache line */
u64 clock_task ____cacheline_aligned;
+ u64 clock_task_mult;
u64 clock_pelt;
unsigned long lost_idle_time;
u64 clock_pelt_idle;
--
2.25.1
next prev parent reply other threads:[~2022-08-29 5:56 UTC|newest]
Thread overview: 61+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-29 5:54 [RFC PATCH 0/1] sched/pelt: Change PELT halflife at runtime Dietmar Eggemann
2022-08-29 5:54 ` Dietmar Eggemann [this message]
2022-08-29 8:08 ` [RFC PATCH 1/1] sched/pelt: Introduce PELT multiplier Peter Zijlstra
2022-08-29 10:02 ` Peter Zijlstra
2022-08-29 10:13 ` Vincent Guittot
2022-08-29 14:23 ` Quentin Perret
2022-08-29 14:34 ` Peter Zijlstra
2022-08-29 15:31 ` Quentin Perret
2022-08-29 15:48 ` Quentin Perret
2022-09-02 7:53 ` Dietmar Eggemann
2022-09-02 8:45 ` Peter Zijlstra
2022-09-06 5:49 ` Vincent Guittot
2022-09-08 6:50 ` Dietmar Eggemann
2022-09-02 7:53 ` Dietmar Eggemann
2022-09-02 8:45 ` Peter Zijlstra
2022-09-20 14:07 ` [RFC PATCH 0/1] sched/pelt: Change PELT halflife at runtime Jian-Min Liu
2022-09-28 17:09 ` Dietmar Eggemann
2022-09-29 9:47 ` Peter Zijlstra
2022-09-29 11:07 ` Dietmar Eggemann
2022-09-29 11:10 ` Kajetan Puchalski
2022-09-29 11:21 ` Peter Zijlstra
2022-09-29 14:41 ` Kajetan Puchalski
2022-10-03 22:57 ` Wei Wang
2022-10-04 9:33 ` Dietmar Eggemann
2022-10-05 16:57 ` Wei Wang
2022-11-07 13:41 ` Peter Zijlstra
2022-11-08 19:48 ` Qais Yousef
2022-11-09 15:49 ` Peter Zijlstra
2022-11-10 13:25 ` Qais Yousef
2023-02-07 10:29 ` Dietmar Eggemann
2023-02-09 16:16 ` Vincent Guittot
2023-02-17 13:54 ` Dietmar Eggemann
2023-02-20 13:54 ` Vincent Guittot
2023-02-21 9:29 ` Vincent Guittot
2023-02-22 20:28 ` Dietmar Eggemann
2023-03-01 10:24 ` Vincent Guittot
2023-02-22 20:13 ` Dietmar Eggemann
2023-03-02 19:36 ` Dietmar Eggemann
2023-02-20 10:13 ` Peter Zijlstra
2023-02-20 13:39 ` Vincent Guittot
2023-02-23 15:37 ` Qais Yousef
2023-03-01 10:39 ` Vincent Guittot
2023-03-01 17:24 ` Qais Yousef
2023-03-02 8:00 ` Vincent Guittot
2023-03-02 19:39 ` Dietmar Eggemann
2023-03-06 19:11 ` Qais Yousef
2023-03-07 13:22 ` Vincent Guittot
2023-03-11 16:55 ` Qais Yousef
2023-03-23 16:29 ` Dietmar Eggemann
2023-04-03 14:45 ` Qais Yousef
2023-04-06 15:58 ` Dietmar Eggemann
2023-04-11 17:51 ` Qais Yousef
2022-11-09 15:18 ` Lukasz Luba
2022-11-10 11:16 ` Dietmar Eggemann
2022-11-10 13:05 ` Peter Zijlstra
2022-11-10 14:59 ` Dietmar Eggemann
2022-11-10 17:51 ` Peter Zijlstra
2022-11-30 18:14 ` Dietmar Eggemann
2022-12-01 13:37 ` Kajetan Puchalski
2022-11-10 12:45 ` Kajetan Puchalski
2022-11-07 9:41 ` Jian-Min Liu (劉建旻)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220829055450.1703092-2-dietmar.eggemann@arm.com \
--to=dietmar.eggemann@arm.com \
--cc=Jian-Min.Liu@mediatek.com \
--cc=adharmap@quicinc.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=morten.rasmussen@arm.com \
--cc=patrick.bellasi@matbug.net \
--cc=peterz@infradead.org \
--cc=qais.yousef@arm.com \
--cc=qperret@google.com \
--cc=vdonnefort@google.com \
--cc=vincent.guittot@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).