* [PATCH] sched/fair: Remove rq->load
@ 2019-04-24 8:45 Dietmar Eggemann
2019-05-06 10:46 ` Dietmar Eggemann
2019-06-03 13:01 ` [tip:sched/core] " tip-bot for Dietmar Eggemann
0 siblings, 2 replies; 4+ messages in thread
From: Dietmar Eggemann @ 2019-04-24 8:45 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar; +Cc: linux-kernel
The CFS class is the only one maintaining and using the CPU wide load
(rq->load(.weight)). The last use case of the CPU wide load in CFS's
set_next_entity() can be replaced by using the load of the CFS class
(rq->cfs.load(.weight)) instead.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/debug.c | 2 --
kernel/sched/fair.c | 7 ++-----
kernel/sched/sched.h | 2 --
3 files changed, 2 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 8039d62ae36e..1148f43dbd42 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -656,8 +656,6 @@ do { \
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
- SEQ_printf(m, " .%-30s: %lu\n", "load",
- rq->load.weight);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a4d9e14bf138..73a6718f29cc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2682,8 +2682,6 @@ static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
struct rq *rq = rq_of(cfs_rq);
@@ -2699,8 +2697,6 @@ static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
account_numa_dequeue(rq_of(cfs_rq), task_of(se));
@@ -4096,7 +4092,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
- if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
+ if (schedstat_enabled() &&
+ rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
schedstat_set(se->statistics.slice_max,
max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efa686eeff26..e4059e81e99c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -830,8 +830,6 @@ struct rq {
atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */
- /* capture load from *all* tasks on this CPU: */
- struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] sched/fair: Remove rq->load
2019-04-24 8:45 [PATCH] sched/fair: Remove rq->load Dietmar Eggemann
@ 2019-05-06 10:46 ` Dietmar Eggemann
2019-05-06 10:54 ` Peter Zijlstra
2019-06-03 13:01 ` [tip:sched/core] " tip-bot for Dietmar Eggemann
1 sibling, 1 reply; 4+ messages in thread
From: Dietmar Eggemann @ 2019-05-06 10:46 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar; +Cc: linux-kernel
Hi,
On 4/24/19 10:45 AM, Dietmar Eggemann wrote:
> The CFS class is the only one maintaining and using the CPU wide load
> (rq->load(.weight)). The last use case of the CPU wide load in CFS's
> set_next_entity() can be replaced by using the load of the CFS class
> (rq->cfs.load(.weight)) instead.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
> ---
> kernel/sched/debug.c | 2 --
> kernel/sched/fair.c | 7 ++-----
> kernel/sched/sched.h | 2 --
> 3 files changed, 2 insertions(+), 9 deletions(-)
>
> diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
> index 8039d62ae36e..1148f43dbd42 100644
> --- a/kernel/sched/debug.c
> +++ b/kernel/sched/debug.c
> @@ -656,8 +656,6 @@ do { \
> SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
>
> P(nr_running);
> - SEQ_printf(m, " .%-30s: %lu\n", "load",
> - rq->load.weight);
> P(nr_switches);
> P(nr_load_updates);
> P(nr_uninterruptible);
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a4d9e14bf138..73a6718f29cc 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2682,8 +2682,6 @@ static void
> account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> update_load_add(&cfs_rq->load, se->load.weight);
> - if (!parent_entity(se))
> - update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
> #ifdef CONFIG_SMP
> if (entity_is_task(se)) {
> struct rq *rq = rq_of(cfs_rq);
> @@ -2699,8 +2697,6 @@ static void
> account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> update_load_sub(&cfs_rq->load, se->load.weight);
> - if (!parent_entity(se))
> - update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
> #ifdef CONFIG_SMP
> if (entity_is_task(se)) {
> account_numa_dequeue(rq_of(cfs_rq), task_of(se));
> @@ -4096,7 +4092,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> * least twice that of our own weight (i.e. dont track it
> * when there are only lesser-weight tasks around):
> */
> - if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
> + if (schedstat_enabled() &&
> + rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
> schedstat_set(se->statistics.slice_max,
> max((u64)schedstat_val(se->statistics.slice_max),
> se->sum_exec_runtime - se->prev_sum_exec_runtime));
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index efa686eeff26..e4059e81e99c 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -830,8 +830,6 @@ struct rq {
> atomic_t nohz_flags;
> #endif /* CONFIG_NO_HZ_COMMON */
>
> - /* capture load from *all* tasks on this CPU: */
> - struct load_weight load;
> unsigned long nr_load_updates;
> u64 nr_switches;
Is there anything else I should do for this patch ?
Thanks,
-- Dietmar
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] sched/fair: Remove rq->load
2019-05-06 10:46 ` Dietmar Eggemann
@ 2019-05-06 10:54 ` Peter Zijlstra
0 siblings, 0 replies; 4+ messages in thread
From: Peter Zijlstra @ 2019-05-06 10:54 UTC (permalink / raw)
To: Dietmar Eggemann; +Cc: Ingo Molnar, linux-kernel
On Mon, May 06, 2019 at 12:46:00PM +0200, Dietmar Eggemann wrote:
> Is there anything else I should do for this patch ?
Got it now. Thanks!
^ permalink raw reply [flat|nested] 4+ messages in thread
* [tip:sched/core] sched/fair: Remove rq->load
2019-04-24 8:45 [PATCH] sched/fair: Remove rq->load Dietmar Eggemann
2019-05-06 10:46 ` Dietmar Eggemann
@ 2019-06-03 13:01 ` tip-bot for Dietmar Eggemann
1 sibling, 0 replies; 4+ messages in thread
From: tip-bot for Dietmar Eggemann @ 2019-06-03 13:01 UTC (permalink / raw)
To: linux-tip-commits
Cc: mingo, torvalds, dietmar.eggemann, peterz, tglx, hpa, linux-kernel
Commit-ID: f2bedc4705659216bd60948029ad8dfedf923ad9
Gitweb: https://git.kernel.org/tip/f2bedc4705659216bd60948029ad8dfedf923ad9
Author: Dietmar Eggemann <dietmar.eggemann@arm.com>
AuthorDate: Wed, 24 Apr 2019 09:45:56 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 11:49:37 +0200
sched/fair: Remove rq->load
The CFS class is the only one maintaining and using the CPU wide load
(rq->load(.weight)). The last use case of the CPU wide load in CFS's
set_next_entity() can be replaced by using the load of the CFS class
(rq->cfs.load(.weight)) instead.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190424084556.604-1-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/debug.c | 2 --
kernel/sched/fair.c | 7 ++-----
kernel/sched/sched.h | 2 --
3 files changed, 2 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 678bfb9bd87f..150043e1d716 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -656,8 +656,6 @@ do { \
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
- SEQ_printf(m, " .%-30s: %lu\n", "load",
- rq->load.weight);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8691a8fffe40..08b1cb06f968 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2686,8 +2686,6 @@ static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
struct rq *rq = rq_of(cfs_rq);
@@ -2703,8 +2701,6 @@ static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
account_numa_dequeue(rq_of(cfs_rq), task_of(se));
@@ -4100,7 +4096,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
- if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
+ if (schedstat_enabled() &&
+ rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
schedstat_set(se->statistics.slice_max,
max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b52ed1ada0be..c308410675ed 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -830,8 +830,6 @@ struct rq {
atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */
- /* capture load from *all* tasks on this CPU: */
- struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-06-03 13:01 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-24 8:45 [PATCH] sched/fair: Remove rq->load Dietmar Eggemann
2019-05-06 10:46 ` Dietmar Eggemann
2019-05-06 10:54 ` Peter Zijlstra
2019-06-03 13:01 ` [tip:sched/core] " tip-bot for Dietmar Eggemann
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.