* [PATCH] sched/fair: Remove the force parameter of update_tg_load_avg()
@ 2020-09-24 1:47 Xianting Tian
2020-09-25 13:17 ` Phil Auld
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Xianting Tian @ 2020-09-24 1:47 UTC (permalink / raw)
To: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
rostedt, bsegall, mgorman
Cc: linux-kernel, Xianting Tian
In the file fair.c, sometims update_tg_load_avg(cfs_rq, 0) is used,
sometimes update_tg_load_avg(cfs_rq, false) is used.
update_tg_load_avg() has the parameter force, but in current code,
it never set 1 or true to it, so remove the force parameter.
Signed-off-by: Xianting Tian <tian.xianting@h3c.com>
---
kernel/sched/fair.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1a68a0536..7056fa97f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -831,7 +831,7 @@ void init_entity_runnable_average(struct sched_entity *se)
void post_init_entity_util_avg(struct task_struct *p)
{
}
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+static void update_tg_load_avg(struct cfs_rq *cfs_rq)
{
}
#endif /* CONFIG_SMP */
@@ -3288,7 +3288,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
/**
* update_tg_load_avg - update the tg's load avg
* @cfs_rq: the cfs_rq whose avg changed
- * @force: update regardless of how small the difference
*
* This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
* However, because tg->load_avg is a global value there are performance
@@ -3300,7 +3299,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
*
* Updating tg's load_avg is necessary before update_cfs_share().
*/
-static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
{
long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
@@ -3310,7 +3309,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
if (cfs_rq->tg == &root_task_group)
return;
- if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
+ if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
atomic_long_add(delta, &cfs_rq->tg->load_avg);
cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
}
@@ -3612,7 +3611,7 @@ static inline bool skip_blocked_update(struct sched_entity *se)
#else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
@@ -3800,13 +3799,13 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* IOW we're enqueueing a task on a new CPU.
*/
attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, 0);
+ update_tg_load_avg(cfs_rq);
} else if (decayed) {
cfs_rq_util_change(cfs_rq, 0);
if (flags & UPDATE_TG)
- update_tg_load_avg(cfs_rq, 0);
+ update_tg_load_avg(cfs_rq);
}
}
@@ -7887,7 +7886,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
struct sched_entity *se;
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
- update_tg_load_avg(cfs_rq, 0);
+ update_tg_load_avg(cfs_rq);
if (cfs_rq == &rq->cfs)
decayed = true;
@@ -10786,7 +10785,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
/* Catch up with the cfs_rq and remove our load when we leave */
update_load_avg(cfs_rq, se, 0);
detach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ update_tg_load_avg(cfs_rq);
propagate_entity_cfs_rq(se);
}
@@ -10805,7 +10804,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
/* Synchronize entity with its cfs_rq */
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ update_tg_load_avg(cfs_rq);
propagate_entity_cfs_rq(se);
}
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] sched/fair: Remove the force parameter of update_tg_load_avg()
2020-09-24 1:47 [PATCH] sched/fair: Remove the force parameter of update_tg_load_avg() Xianting Tian
@ 2020-09-25 13:17 ` Phil Auld
2020-09-25 14:11 ` Vincent Guittot
2020-09-29 7:56 ` [tip: sched/core] " tip-bot2 for Xianting Tian
2 siblings, 0 replies; 4+ messages in thread
From: Phil Auld @ 2020-09-25 13:17 UTC (permalink / raw)
To: Xianting Tian
Cc: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
rostedt, bsegall, mgorman, linux-kernel
On Thu, Sep 24, 2020 at 09:47:55AM +0800 Xianting Tian wrote:
> In the file fair.c, sometims update_tg_load_avg(cfs_rq, 0) is used,
> sometimes update_tg_load_avg(cfs_rq, false) is used.
> update_tg_load_avg() has the parameter force, but in current code,
> it never set 1 or true to it, so remove the force parameter.
>
> Signed-off-by: Xianting Tian <tian.xianting@h3c.com>
> ---
> kernel/sched/fair.c | 19 +++++++++----------
> 1 file changed, 9 insertions(+), 10 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1a68a0536..7056fa97f 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -831,7 +831,7 @@ void init_entity_runnable_average(struct sched_entity *se)
> void post_init_entity_util_avg(struct task_struct *p)
> {
> }
> -static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
> +static void update_tg_load_avg(struct cfs_rq *cfs_rq)
> {
> }
> #endif /* CONFIG_SMP */
> @@ -3288,7 +3288,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
> /**
> * update_tg_load_avg - update the tg's load avg
> * @cfs_rq: the cfs_rq whose avg changed
> - * @force: update regardless of how small the difference
> *
> * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
> * However, because tg->load_avg is a global value there are performance
> @@ -3300,7 +3299,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
> *
> * Updating tg's load_avg is necessary before update_cfs_share().
> */
> -static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
> +static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
> {
> long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
>
> @@ -3310,7 +3309,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
> if (cfs_rq->tg == &root_task_group)
> return;
>
> - if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
> + if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
> atomic_long_add(delta, &cfs_rq->tg->load_avg);
> cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
> }
> @@ -3612,7 +3611,7 @@ static inline bool skip_blocked_update(struct sched_entity *se)
>
> #else /* CONFIG_FAIR_GROUP_SCHED */
>
> -static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
> +static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
>
> static inline int propagate_entity_load_avg(struct sched_entity *se)
> {
> @@ -3800,13 +3799,13 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
> * IOW we're enqueueing a task on a new CPU.
> */
> attach_entity_load_avg(cfs_rq, se);
> - update_tg_load_avg(cfs_rq, 0);
> + update_tg_load_avg(cfs_rq);
>
> } else if (decayed) {
> cfs_rq_util_change(cfs_rq, 0);
>
> if (flags & UPDATE_TG)
> - update_tg_load_avg(cfs_rq, 0);
> + update_tg_load_avg(cfs_rq);
> }
> }
>
> @@ -7887,7 +7886,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
> struct sched_entity *se;
>
> if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
> - update_tg_load_avg(cfs_rq, 0);
> + update_tg_load_avg(cfs_rq);
>
> if (cfs_rq == &rq->cfs)
> decayed = true;
> @@ -10786,7 +10785,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
> /* Catch up with the cfs_rq and remove our load when we leave */
> update_load_avg(cfs_rq, se, 0);
> detach_entity_load_avg(cfs_rq, se);
> - update_tg_load_avg(cfs_rq, false);
> + update_tg_load_avg(cfs_rq);
> propagate_entity_cfs_rq(se);
> }
>
> @@ -10805,7 +10804,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
> /* Synchronize entity with its cfs_rq */
> update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
> attach_entity_load_avg(cfs_rq, se);
> - update_tg_load_avg(cfs_rq, false);
> + update_tg_load_avg(cfs_rq);
> propagate_entity_cfs_rq(se);
> }
>
> --
> 2.17.1
>
LGTM,
Reviewed-by: Phil Auld <pauld@redhat.com>
--
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] sched/fair: Remove the force parameter of update_tg_load_avg()
2020-09-24 1:47 [PATCH] sched/fair: Remove the force parameter of update_tg_load_avg() Xianting Tian
2020-09-25 13:17 ` Phil Auld
@ 2020-09-25 14:11 ` Vincent Guittot
2020-09-29 7:56 ` [tip: sched/core] " tip-bot2 for Xianting Tian
2 siblings, 0 replies; 4+ messages in thread
From: Vincent Guittot @ 2020-09-25 14:11 UTC (permalink / raw)
To: Xianting Tian
Cc: Ingo Molnar, Peter Zijlstra, Juri Lelli, Dietmar Eggemann,
Steven Rostedt, Ben Segall, Mel Gorman, linux-kernel
On Thu, 24 Sep 2020 at 03:55, Xianting Tian <tian.xianting@h3c.com> wrote:
>
> In the file fair.c, sometims update_tg_load_avg(cfs_rq, 0) is used,
> sometimes update_tg_load_avg(cfs_rq, false) is used.
> update_tg_load_avg() has the parameter force, but in current code,
> it never set 1 or true to it, so remove the force parameter.
>
> Signed-off-by: Xianting Tian <tian.xianting@h3c.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/fair.c | 19 +++++++++----------
> 1 file changed, 9 insertions(+), 10 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1a68a0536..7056fa97f 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -831,7 +831,7 @@ void init_entity_runnable_average(struct sched_entity *se)
> void post_init_entity_util_avg(struct task_struct *p)
> {
> }
> -static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
> +static void update_tg_load_avg(struct cfs_rq *cfs_rq)
> {
> }
> #endif /* CONFIG_SMP */
> @@ -3288,7 +3288,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
> /**
> * update_tg_load_avg - update the tg's load avg
> * @cfs_rq: the cfs_rq whose avg changed
> - * @force: update regardless of how small the difference
> *
> * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
> * However, because tg->load_avg is a global value there are performance
> @@ -3300,7 +3299,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
> *
> * Updating tg's load_avg is necessary before update_cfs_share().
> */
> -static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
> +static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
> {
> long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
>
> @@ -3310,7 +3309,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
> if (cfs_rq->tg == &root_task_group)
> return;
>
> - if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
> + if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
> atomic_long_add(delta, &cfs_rq->tg->load_avg);
> cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
> }
> @@ -3612,7 +3611,7 @@ static inline bool skip_blocked_update(struct sched_entity *se)
>
> #else /* CONFIG_FAIR_GROUP_SCHED */
>
> -static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
> +static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
>
> static inline int propagate_entity_load_avg(struct sched_entity *se)
> {
> @@ -3800,13 +3799,13 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
> * IOW we're enqueueing a task on a new CPU.
> */
> attach_entity_load_avg(cfs_rq, se);
> - update_tg_load_avg(cfs_rq, 0);
> + update_tg_load_avg(cfs_rq);
>
> } else if (decayed) {
> cfs_rq_util_change(cfs_rq, 0);
>
> if (flags & UPDATE_TG)
> - update_tg_load_avg(cfs_rq, 0);
> + update_tg_load_avg(cfs_rq);
> }
> }
>
> @@ -7887,7 +7886,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
> struct sched_entity *se;
>
> if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
> - update_tg_load_avg(cfs_rq, 0);
> + update_tg_load_avg(cfs_rq);
>
> if (cfs_rq == &rq->cfs)
> decayed = true;
> @@ -10786,7 +10785,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
> /* Catch up with the cfs_rq and remove our load when we leave */
> update_load_avg(cfs_rq, se, 0);
> detach_entity_load_avg(cfs_rq, se);
> - update_tg_load_avg(cfs_rq, false);
> + update_tg_load_avg(cfs_rq);
> propagate_entity_cfs_rq(se);
> }
>
> @@ -10805,7 +10804,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
> /* Synchronize entity with its cfs_rq */
> update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
> attach_entity_load_avg(cfs_rq, se);
> - update_tg_load_avg(cfs_rq, false);
> + update_tg_load_avg(cfs_rq);
> propagate_entity_cfs_rq(se);
> }
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* [tip: sched/core] sched/fair: Remove the force parameter of update_tg_load_avg()
2020-09-24 1:47 [PATCH] sched/fair: Remove the force parameter of update_tg_load_avg() Xianting Tian
2020-09-25 13:17 ` Phil Auld
2020-09-25 14:11 ` Vincent Guittot
@ 2020-09-29 7:56 ` tip-bot2 for Xianting Tian
2 siblings, 0 replies; 4+ messages in thread
From: tip-bot2 for Xianting Tian @ 2020-09-29 7:56 UTC (permalink / raw)
To: linux-tip-commits; +Cc: Xianting Tian, Peter Zijlstra (Intel), x86, LKML
The following commit has been merged into the sched/core branch of tip:
Commit-ID: fe7491580d7c56152ea8d9d3124201191617435d
Gitweb: https://git.kernel.org/tip/fe7491580d7c56152ea8d9d3124201191617435d
Author: Xianting Tian <tian.xianting@h3c.com>
AuthorDate: Thu, 24 Sep 2020 09:47:55 +08:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 25 Sep 2020 14:23:25 +02:00
sched/fair: Remove the force parameter of update_tg_load_avg()
In the file fair.c, sometims update_tg_load_avg(cfs_rq, 0) is used,
sometimes update_tg_load_avg(cfs_rq, false) is used.
update_tg_load_avg() has the parameter force, but in current code,
it never set 1 or true to it, so remove the force parameter.
Signed-off-by: Xianting Tian <tian.xianting@h3c.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200924014755.36253-1-tian.xianting@h3c.com
---
kernel/sched/fair.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9613e5d..b56276a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -831,7 +831,7 @@ void init_entity_runnable_average(struct sched_entity *se)
void post_init_entity_util_avg(struct task_struct *p)
{
}
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+static void update_tg_load_avg(struct cfs_rq *cfs_rq)
{
}
#endif /* CONFIG_SMP */
@@ -3293,7 +3293,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
/**
* update_tg_load_avg - update the tg's load avg
* @cfs_rq: the cfs_rq whose avg changed
- * @force: update regardless of how small the difference
*
* This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
* However, because tg->load_avg is a global value there are performance
@@ -3305,7 +3304,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
*
* Updating tg's load_avg is necessary before update_cfs_share().
*/
-static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
{
long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
@@ -3315,7 +3314,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
if (cfs_rq->tg == &root_task_group)
return;
- if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
+ if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
atomic_long_add(delta, &cfs_rq->tg->load_avg);
cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
}
@@ -3617,7 +3616,7 @@ static inline bool skip_blocked_update(struct sched_entity *se)
#else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
@@ -3805,13 +3804,13 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* IOW we're enqueueing a task on a new CPU.
*/
attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, 0);
+ update_tg_load_avg(cfs_rq);
} else if (decayed) {
cfs_rq_util_change(cfs_rq, 0);
if (flags & UPDATE_TG)
- update_tg_load_avg(cfs_rq, 0);
+ update_tg_load_avg(cfs_rq);
}
}
@@ -7898,7 +7897,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
struct sched_entity *se;
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
- update_tg_load_avg(cfs_rq, 0);
+ update_tg_load_avg(cfs_rq);
if (cfs_rq == &rq->cfs)
decayed = true;
@@ -10797,7 +10796,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
/* Catch up with the cfs_rq and remove our load when we leave */
update_load_avg(cfs_rq, se, 0);
detach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ update_tg_load_avg(cfs_rq);
propagate_entity_cfs_rq(se);
}
@@ -10816,7 +10815,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
/* Synchronize entity with its cfs_rq */
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ update_tg_load_avg(cfs_rq);
propagate_entity_cfs_rq(se);
}
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-09-29 7:57 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-24 1:47 [PATCH] sched/fair: Remove the force parameter of update_tg_load_avg() Xianting Tian
2020-09-25 13:17 ` Phil Auld
2020-09-25 14:11 ` Vincent Guittot
2020-09-29 7:56 ` [tip: sched/core] " tip-bot2 for Xianting Tian
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).