linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] sched/fair: remove redundant call to cpufreq_update_util
@ 2020-01-15 10:20 Vincent Guittot
  2020-01-15 10:28 ` Rafael J. Wysocki
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Vincent Guittot @ 2020-01-15 10:20 UTC (permalink / raw)
  To: rjw, viresh.kumar, mingo, peterz, juri.lelli, dietmar.eggemann,
	rostedt, bsegall, mgorman, linux-kernel, linux-pm
  Cc: Vincent Guittot

With commit bef69dd87828 ("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()")
update_load_avg() has become the central point for calling cpufreq (not
including the update of blocked load). This change helps to simplify
further the number of call to cpufreq_update_util() and to remove last
redundant ones. With update_load_avg(), we are now sure that
cpufreq_update_util() will be called after every task attachment to a
cfs_rq and especially after propagating this event down to the util_avg of
the root cfs_rq, which is the level that is used by cpufreq governors like
schedutil to set the frequency of a CPU.

The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when the
migration happens in a cgroup whereas util_avg of root cfs_rq is not yet
updated and this call is duplicated with the one that happens immediately
after when the migration event reaches the root cfs_rq. The dedicated flag
SCHED_CPUFREQ_MIGRATION is now useless and can be removed. The interface of
attach_entity_load_avg() can also be simplified accordingly.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 include/linux/sched/cpufreq.h |  1 -
 kernel/sched/fair.c           | 14 +++++++-------
 2 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index cc6bcc1e96bc..3ed5aa18593f 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -9,7 +9,6 @@
  */
 
 #define SCHED_CPUFREQ_IOWAIT	(1U << 0)
-#define SCHED_CPUFREQ_MIGRATION	(1U << 1)
 
 #ifdef CONFIG_CPU_FREQ
 struct cpufreq_policy;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2d170b5da0e3..023aa42aaac7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -801,7 +801,7 @@ void post_init_entity_util_avg(struct task_struct *p)
 		 * For !fair tasks do:
 		 *
 		update_cfs_rq_load_avg(now, cfs_rq);
-		attach_entity_load_avg(cfs_rq, se, 0);
+		attach_entity_load_avg(cfs_rq, se);
 		switched_from_fair(rq, p);
 		 *
 		 * such that the next switched_to_fair() has the
@@ -3114,7 +3114,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 {
 	struct rq *rq = rq_of(cfs_rq);
 
-	if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
+	if (&rq->cfs == cfs_rq) {
 		/*
 		 * There are a few boundary cases this might miss but it should
 		 * get called often enough that that should (hopefully) not be
@@ -3520,7 +3520,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  * Must call update_cfs_rq_load_avg() before this, since we rely on
  * cfs_rq->avg.last_update_time being current.
  */
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
 
@@ -3556,7 +3556,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
 	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
 
-	cfs_rq_util_change(cfs_rq, flags);
+	cfs_rq_util_change(cfs_rq, 0);
 
 	trace_pelt_cfs_tp(cfs_rq);
 }
@@ -3614,7 +3614,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 		 *
 		 * IOW we're enqueueing a task on a new CPU.
 		 */
-		attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
+		attach_entity_load_avg(cfs_rq, se);
 		update_tg_load_avg(cfs_rq, 0);
 
 	} else if (decayed) {
@@ -3871,7 +3871,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
 
 static inline void
-attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
+attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 
@@ -10439,7 +10439,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
 
 	/* Synchronize entity with its cfs_rq */
 	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
-	attach_entity_load_avg(cfs_rq, se, 0);
+	attach_entity_load_avg(cfs_rq, se);
 	update_tg_load_avg(cfs_rq, false);
 	propagate_entity_cfs_rq(se);
 }
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] sched/fair: remove redundant call to cpufreq_update_util
  2020-01-15 10:20 [PATCH] sched/fair: remove redundant call to cpufreq_update_util Vincent Guittot
@ 2020-01-15 10:28 ` Rafael J. Wysocki
  2020-01-15 15:35 ` Peter Zijlstra
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Rafael J. Wysocki @ 2020-01-15 10:28 UTC (permalink / raw)
  To: Vincent Guittot
  Cc: viresh.kumar, mingo, peterz, juri.lelli, dietmar.eggemann,
	rostedt, bsegall, mgorman, linux-kernel, linux-pm

On Wednesday, January 15, 2020 11:20:20 AM CET Vincent Guittot wrote:
> With commit bef69dd87828 ("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()")
> update_load_avg() has become the central point for calling cpufreq (not
> including the update of blocked load). This change helps to simplify
> further the number of call to cpufreq_update_util() and to remove last

s/call/calls/

And the patch looks reasonable to me.

> redundant ones. With update_load_avg(), we are now sure that
> cpufreq_update_util() will be called after every task attachment to a
> cfs_rq and especially after propagating this event down to the util_avg of
> the root cfs_rq, which is the level that is used by cpufreq governors like
> schedutil to set the frequency of a CPU.
> 
> The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when the
> migration happens in a cgroup whereas util_avg of root cfs_rq is not yet
> updated and this call is duplicated with the one that happens immediately
> after when the migration event reaches the root cfs_rq. The dedicated flag
> SCHED_CPUFREQ_MIGRATION is now useless and can be removed. The interface of
> attach_entity_load_avg() can also be simplified accordingly.
> 
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

> ---
>  include/linux/sched/cpufreq.h |  1 -
>  kernel/sched/fair.c           | 14 +++++++-------
>  2 files changed, 7 insertions(+), 8 deletions(-)
> 
> diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
> index cc6bcc1e96bc..3ed5aa18593f 100644
> --- a/include/linux/sched/cpufreq.h
> +++ b/include/linux/sched/cpufreq.h
> @@ -9,7 +9,6 @@
>   */
>  
>  #define SCHED_CPUFREQ_IOWAIT	(1U << 0)
> -#define SCHED_CPUFREQ_MIGRATION	(1U << 1)
>  
>  #ifdef CONFIG_CPU_FREQ
>  struct cpufreq_policy;
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 2d170b5da0e3..023aa42aaac7 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -801,7 +801,7 @@ void post_init_entity_util_avg(struct task_struct *p)
>  		 * For !fair tasks do:
>  		 *
>  		update_cfs_rq_load_avg(now, cfs_rq);
> -		attach_entity_load_avg(cfs_rq, se, 0);
> +		attach_entity_load_avg(cfs_rq, se);
>  		switched_from_fair(rq, p);
>  		 *
>  		 * such that the next switched_to_fair() has the
> @@ -3114,7 +3114,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
>  {
>  	struct rq *rq = rq_of(cfs_rq);
>  
> -	if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
> +	if (&rq->cfs == cfs_rq) {
>  		/*
>  		 * There are a few boundary cases this might miss but it should
>  		 * get called often enough that that should (hopefully) not be
> @@ -3520,7 +3520,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
>   * Must call update_cfs_rq_load_avg() before this, since we rely on
>   * cfs_rq->avg.last_update_time being current.
>   */
> -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> +static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
>  {
>  	u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
>  
> @@ -3556,7 +3556,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>  
>  	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
>  
> -	cfs_rq_util_change(cfs_rq, flags);
> +	cfs_rq_util_change(cfs_rq, 0);
>  
>  	trace_pelt_cfs_tp(cfs_rq);
>  }
> @@ -3614,7 +3614,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>  		 *
>  		 * IOW we're enqueueing a task on a new CPU.
>  		 */
> -		attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
> +		attach_entity_load_avg(cfs_rq, se);
>  		update_tg_load_avg(cfs_rq, 0);
>  
>  	} else if (decayed) {
> @@ -3871,7 +3871,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>  static inline void remove_entity_load_avg(struct sched_entity *se) {}
>  
>  static inline void
> -attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
> +attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
>  static inline void
>  detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
>  
> @@ -10439,7 +10439,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
>  
>  	/* Synchronize entity with its cfs_rq */
>  	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
> -	attach_entity_load_avg(cfs_rq, se, 0);
> +	attach_entity_load_avg(cfs_rq, se);
>  	update_tg_load_avg(cfs_rq, false);
>  	propagate_entity_cfs_rq(se);
>  }
> 





^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] sched/fair: remove redundant call to cpufreq_update_util
  2020-01-15 10:20 [PATCH] sched/fair: remove redundant call to cpufreq_update_util Vincent Guittot
  2020-01-15 10:28 ` Rafael J. Wysocki
@ 2020-01-15 15:35 ` Peter Zijlstra
  2020-01-15 20:20 ` Dietmar Eggemann
  2020-01-17 10:08 ` [tip: sched/core] sched/fair: Remove redundant call to cpufreq_update_util() tip-bot2 for Vincent Guittot
  3 siblings, 0 replies; 6+ messages in thread
From: Peter Zijlstra @ 2020-01-15 15:35 UTC (permalink / raw)
  To: Vincent Guittot
  Cc: rjw, viresh.kumar, mingo, juri.lelli, dietmar.eggemann, rostedt,
	bsegall, mgorman, linux-kernel, linux-pm

On Wed, Jan 15, 2020 at 11:20:20AM +0100, Vincent Guittot wrote:
> With commit bef69dd87828 ("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()")
> update_load_avg() has become the central point for calling cpufreq (not
> including the update of blocked load). This change helps to simplify
> further the number of call to cpufreq_update_util() and to remove last
> redundant ones. With update_load_avg(), we are now sure that
> cpufreq_update_util() will be called after every task attachment to a
> cfs_rq and especially after propagating this event down to the util_avg of
> the root cfs_rq, which is the level that is used by cpufreq governors like
> schedutil to set the frequency of a CPU.
> 
> The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when the
> migration happens in a cgroup whereas util_avg of root cfs_rq is not yet
> updated and this call is duplicated with the one that happens immediately
> after when the migration event reaches the root cfs_rq. The dedicated flag
> SCHED_CPUFREQ_MIGRATION is now useless and can be removed. The interface of
> attach_entity_load_avg() can also be simplified accordingly.
> 
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

Thanks!

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] sched/fair: remove redundant call to cpufreq_update_util
  2020-01-15 10:20 [PATCH] sched/fair: remove redundant call to cpufreq_update_util Vincent Guittot
  2020-01-15 10:28 ` Rafael J. Wysocki
  2020-01-15 15:35 ` Peter Zijlstra
@ 2020-01-15 20:20 ` Dietmar Eggemann
  2020-01-16  7:52   ` Vincent Guittot
  2020-01-17 10:08 ` [tip: sched/core] sched/fair: Remove redundant call to cpufreq_update_util() tip-bot2 for Vincent Guittot
  3 siblings, 1 reply; 6+ messages in thread
From: Dietmar Eggemann @ 2020-01-15 20:20 UTC (permalink / raw)
  To: Vincent Guittot, rjw, viresh.kumar, mingo, peterz, juri.lelli,
	rostedt, bsegall, mgorman, linux-kernel, linux-pm

On 15/01/2020 11:20, Vincent Guittot wrote:
> With commit bef69dd87828 ("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()")
> update_load_avg() has become the central point for calling cpufreq (not
> including the update of blocked load). This change helps to simplify
> further the number of call to cpufreq_update_util() and to remove last
> redundant ones. With update_load_avg(), we are now sure that
> cpufreq_update_util() will be called after every task attachment to a
> cfs_rq and especially after propagating this event down to the util_avg of
> the root cfs_rq, which is the level that is used by cpufreq governors like
> schedutil to set the frequency of a CPU.
> 
> The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when the
> migration happens in a cgroup whereas util_avg of root cfs_rq is not yet
> updated and this call is duplicated with the one that happens immediately
> after when the migration event reaches the root cfs_rq. The dedicated flag
> SCHED_CPUFREQ_MIGRATION is now useless and can be removed. The interface of
> attach_entity_load_avg() can also be simplified accordingly.
> 
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>

LGTM. Doesn't this allow to get rid of the 'int flags' in
cfs_rq_util_change() as well?

8<---

 kernel/sched/fair.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 328d59e8afba..f82f4fde0cd3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3110,7 +3110,7 @@ static inline void update_cfs_group(struct sched_entity *se)
 }
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
-static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
+static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
 {
 	struct rq *rq = rq_of(cfs_rq);
 
@@ -3129,7 +3129,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 		 *
 		 * See cpu_util().
 		 */
-		cpufreq_update_util(rq, flags);
+		cpufreq_update_util(rq, 0);
 	}
 }
 
@@ -3556,7 +3556,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
 	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
 
-	cfs_rq_util_change(cfs_rq, 0);
+	cfs_rq_util_change(cfs_rq);
 
 	trace_pelt_cfs_tp(cfs_rq);
 }
@@ -3577,7 +3577,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
 	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
 
-	cfs_rq_util_change(cfs_rq, 0);
+	cfs_rq_util_change(cfs_rq);
 
 	trace_pelt_cfs_tp(cfs_rq);
 }
@@ -3618,7 +3618,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 		update_tg_load_avg(cfs_rq, 0);
 
 	} else if (decayed) {
-		cfs_rq_util_change(cfs_rq, 0);
+		cfs_rq_util_change(cfs_rq);
 
 		if (flags & UPDATE_TG)
 			update_tg_load_avg(cfs_rq, 0);
@@ -3851,7 +3851,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
 
 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
 {
-	cfs_rq_util_change(cfs_rq, 0);
+	cfs_rq_util_change(cfs_rq);
 }
 
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] sched/fair: remove redundant call to cpufreq_update_util
  2020-01-15 20:20 ` Dietmar Eggemann
@ 2020-01-16  7:52   ` Vincent Guittot
  0 siblings, 0 replies; 6+ messages in thread
From: Vincent Guittot @ 2020-01-16  7:52 UTC (permalink / raw)
  To: Dietmar Eggemann
  Cc: Rafael J. Wysocki, viresh kumar, Ingo Molnar, Peter Zijlstra,
	Juri Lelli, Steven Rostedt, Ben Segall, Mel Gorman, linux-kernel,
	open list:THERMAL

On Wed, 15 Jan 2020 at 21:20, Dietmar Eggemann <dietmar.eggemann@arm.com> wrote:
>
> On 15/01/2020 11:20, Vincent Guittot wrote:
> > With commit bef69dd87828 ("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()")
> > update_load_avg() has become the central point for calling cpufreq (not
> > including the update of blocked load). This change helps to simplify
> > further the number of call to cpufreq_update_util() and to remove last
> > redundant ones. With update_load_avg(), we are now sure that
> > cpufreq_update_util() will be called after every task attachment to a
> > cfs_rq and especially after propagating this event down to the util_avg of
> > the root cfs_rq, which is the level that is used by cpufreq governors like
> > schedutil to set the frequency of a CPU.
> >
> > The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when the
> > migration happens in a cgroup whereas util_avg of root cfs_rq is not yet
> > updated and this call is duplicated with the one that happens immediately
> > after when the migration event reaches the root cfs_rq. The dedicated flag
> > SCHED_CPUFREQ_MIGRATION is now useless and can be removed. The interface of
> > attach_entity_load_avg() can also be simplified accordingly.
> >
> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
>
> LGTM. Doesn't this allow to get rid of the 'int flags' in
> cfs_rq_util_change() as well?

I asked myself the same question but decided to keep the same
interface as cpufreq_update_util

>
> 8<---
>
>  kernel/sched/fair.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 328d59e8afba..f82f4fde0cd3 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3110,7 +3110,7 @@ static inline void update_cfs_group(struct sched_entity *se)
>  }
>  #endif /* CONFIG_FAIR_GROUP_SCHED */
>
> -static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
> +static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
>  {
>         struct rq *rq = rq_of(cfs_rq);
>
> @@ -3129,7 +3129,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
>                  *
>                  * See cpu_util().
>                  */
> -               cpufreq_update_util(rq, flags);
> +               cpufreq_update_util(rq, 0);
>         }
>  }
>
> @@ -3556,7 +3556,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>
>         add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
>
> -       cfs_rq_util_change(cfs_rq, 0);
> +       cfs_rq_util_change(cfs_rq);
>
>         trace_pelt_cfs_tp(cfs_rq);
>  }
> @@ -3577,7 +3577,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>
>         add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
>
> -       cfs_rq_util_change(cfs_rq, 0);
> +       cfs_rq_util_change(cfs_rq);
>
>         trace_pelt_cfs_tp(cfs_rq);
>  }
> @@ -3618,7 +3618,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>                 update_tg_load_avg(cfs_rq, 0);
>
>         } else if (decayed) {
> -               cfs_rq_util_change(cfs_rq, 0);
> +               cfs_rq_util_change(cfs_rq);
>
>                 if (flags & UPDATE_TG)
>                         update_tg_load_avg(cfs_rq, 0);
> @@ -3851,7 +3851,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
>
>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
>  {
> -       cfs_rq_util_change(cfs_rq, 0);
> +       cfs_rq_util_change(cfs_rq);
>  }
>
>  static inline void remove_entity_load_avg(struct sched_entity *se) {}
> --
> 2.17.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [tip: sched/core] sched/fair: Remove redundant call to cpufreq_update_util()
  2020-01-15 10:20 [PATCH] sched/fair: remove redundant call to cpufreq_update_util Vincent Guittot
                   ` (2 preceding siblings ...)
  2020-01-15 20:20 ` Dietmar Eggemann
@ 2020-01-17 10:08 ` tip-bot2 for Vincent Guittot
  3 siblings, 0 replies; 6+ messages in thread
From: tip-bot2 for Vincent Guittot @ 2020-01-17 10:08 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Vincent Guittot, Peter Zijlstra (Intel), Rafael J. Wysocki, x86, LKML

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     a4f9a0e51bbf89cb461b1985a1a570e6b87da3b5
Gitweb:        https://git.kernel.org/tip/a4f9a0e51bbf89cb461b1985a1a570e6b87da3b5
Author:        Vincent Guittot <vincent.guittot@linaro.org>
AuthorDate:    Wed, 15 Jan 2020 11:20:20 +01:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 17 Jan 2020 10:19:22 +01:00

sched/fair: Remove redundant call to cpufreq_update_util()

With commit

  bef69dd87828 ("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()")

update_load_avg() has become the central point for calling cpufreq
(not including the update of blocked load). This change helps to
simplify further the number of calls to cpufreq_update_util() and to
remove last redundant ones. With update_load_avg(), we are now sure
that cpufreq_update_util() will be called after every task attachment
to a cfs_rq and especially after propagating this event down to the
util_avg of the root cfs_rq, which is the level that is used by
cpufreq governors like schedutil to set the frequency of a CPU.

The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when
the migration happens in a cgroup whereas util_avg of root cfs_rq is
not yet updated and this call is duplicated with the one that happens
immediately after when the migration event reaches the root cfs_rq.
The dedicated flag SCHED_CPUFREQ_MIGRATION is now useless and can be
removed. The interface of attach_entity_load_avg() can also be
simplified accordingly.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lkml.kernel.org/r/1579083620-24943-1-git-send-email-vincent.guittot@linaro.org
---
 include/linux/sched/cpufreq.h |  1 -
 kernel/sched/fair.c           | 14 +++++++-------
 2 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index cc6bcc1..3ed5aa1 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -9,7 +9,6 @@
  */
 
 #define SCHED_CPUFREQ_IOWAIT	(1U << 0)
-#define SCHED_CPUFREQ_MIGRATION	(1U << 1)
 
 #ifdef CONFIG_CPU_FREQ
 struct cpufreq_policy;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e84723c..ebf5095 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -801,7 +801,7 @@ void post_init_entity_util_avg(struct task_struct *p)
 		 * For !fair tasks do:
 		 *
 		update_cfs_rq_load_avg(now, cfs_rq);
-		attach_entity_load_avg(cfs_rq, se, 0);
+		attach_entity_load_avg(cfs_rq, se);
 		switched_from_fair(rq, p);
 		 *
 		 * such that the next switched_to_fair() has the
@@ -3114,7 +3114,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 {
 	struct rq *rq = rq_of(cfs_rq);
 
-	if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
+	if (&rq->cfs == cfs_rq) {
 		/*
 		 * There are a few boundary cases this might miss but it should
 		 * get called often enough that that should (hopefully) not be
@@ -3521,7 +3521,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  * Must call update_cfs_rq_load_avg() before this, since we rely on
  * cfs_rq->avg.last_update_time being current.
  */
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
 
@@ -3557,7 +3557,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
 	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
 
-	cfs_rq_util_change(cfs_rq, flags);
+	cfs_rq_util_change(cfs_rq, 0);
 
 	trace_pelt_cfs_tp(cfs_rq);
 }
@@ -3615,7 +3615,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 		 *
 		 * IOW we're enqueueing a task on a new CPU.
 		 */
-		attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
+		attach_entity_load_avg(cfs_rq, se);
 		update_tg_load_avg(cfs_rq, 0);
 
 	} else if (decayed) {
@@ -3872,7 +3872,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
 
 static inline void
-attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
+attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 
@@ -10436,7 +10436,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
 
 	/* Synchronize entity with its cfs_rq */
 	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
-	attach_entity_load_avg(cfs_rq, se, 0);
+	attach_entity_load_avg(cfs_rq, se);
 	update_tg_load_avg(cfs_rq, false);
 	propagate_entity_cfs_rq(se);
 }

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-01-17 10:09 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-15 10:20 [PATCH] sched/fair: remove redundant call to cpufreq_update_util Vincent Guittot
2020-01-15 10:28 ` Rafael J. Wysocki
2020-01-15 15:35 ` Peter Zijlstra
2020-01-15 20:20 ` Dietmar Eggemann
2020-01-16  7:52   ` Vincent Guittot
2020-01-17 10:08 ` [tip: sched/core] sched/fair: Remove redundant call to cpufreq_update_util() tip-bot2 for Vincent Guittot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).