linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue
@ 2020-12-18  9:27 Xuewen Yan
  2020-12-18 13:12 ` Vincent Guittot
  2021-01-14 11:29 ` [tip: sched/core] " tip-bot2 for Xuewen Yan
  0 siblings, 2 replies; 4+ messages in thread
From: Xuewen Yan @ 2020-12-18  9:27 UTC (permalink / raw)
  To: dietmar.eggemann, vincent.guittot, juri.lelli, peterz, mingo
  Cc: rostedt, bsegall, mgorman, bristot, linux-kernel,
	patrick.bellasi, zhang.lyra, Ke.Wang, xuewyan, Xuewen.Yan

From: Xuewen Yan <xuewen.yan@unisoc.com>

CPU (root cfs_rq) estimated utilization (util_est) is currently used in
dequeue_task_fair() to drive frequency selection before it is updated.

with:

CPU_util        : rq->cfs.avg.util_avg
CPU_util_est    : rq->cfs.avg.util_est
CPU_utilization : max(CPU_util, CPU_util_est)
task_util       : p->se.avg.util_avg
task_util_est   : p->se.avg.util_est

dequeue_task_fair():

    /* (1) CPU_util and task_util update + inform schedutil about
           CPU_utilization changes */
    for_each_sched_entity() /* 2 loops */
        (dequeue_entity() ->) update_load_avg() -> cfs_rq_util_change()
         -> cpufreq_update_util() ->...-> sugov_update_[shared\|single]
         -> sugov_get_util() -> cpu_util_cfs()

    /* (2) CPU_util_est and task_util_est update */
    util_est_dequeue()

cpu_util_cfs() uses CPU_utilization which could lead to a false (too
high) utilization value for schedutil in task ramp-down or ramp-up
scenarios during task dequeue.

To mitigate the issue split the util_est update (2) into:

 (A) CPU_util_est update in util_est_dequeue()
 (B) task_util_est update in util_est_update()

Place (A) before (1) and keep (B) where (2) is. The latter is necessary
since (B) relies on task_util update in (1).

Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
Changes since v2:
-modify the comment
-move util_est_dequeue above within_margin()
-modify the tab and space

Changes since v1:
-change the util_est_dequeue/update to inline type
-use unsigned int enqueued rather than util_est in util_est_dequeue
-remove "cpu" var

---
 kernel/sched/fair.c | 43 ++++++++++++++++++++++++++++---------------
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ae7ceba..f3a1b7a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3932,6 +3932,22 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
 	trace_sched_util_est_cfs_tp(cfs_rq);
 }
 
+static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
+				    struct task_struct *p)
+{
+	unsigned int enqueued;
+
+	if (!sched_feat(UTIL_EST))
+		return;
+
+	/* Update root cfs_rq's estimated utilization */
+	enqueued  = cfs_rq->avg.util_est.enqueued;
+	enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
+	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+	trace_sched_util_est_cfs_tp(cfs_rq);
+}
+
 /*
  * Check if a (signed) value is within a specified (unsigned) margin,
  * based on the observation that:
@@ -3945,23 +3961,16 @@ static inline bool within_margin(int value, int margin)
 	return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
 }
 
-static void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
+static inline void util_est_update(struct cfs_rq *cfs_rq,
+				   struct task_struct *p,
+				   bool task_sleep)
 {
 	long last_ewma_diff;
 	struct util_est ue;
-	int cpu;
 
 	if (!sched_feat(UTIL_EST))
 		return;
 
-	/* Update root cfs_rq's estimated utilization */
-	ue.enqueued  = cfs_rq->avg.util_est.enqueued;
-	ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
-	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
-
-	trace_sched_util_est_cfs_tp(cfs_rq);
-
 	/*
 	 * Skip update of task's estimated utilization when the task has not
 	 * yet completed an activation, e.g. being migrated.
@@ -4001,8 +4010,7 @@ static inline bool within_margin(int value, int margin)
 	 * To avoid overestimation of actual task utilization, skip updates if
 	 * we cannot grant there is idle time in this CPU.
 	 */
-	cpu = cpu_of(rq_of(cfs_rq));
-	if (task_util(p) > capacity_orig_of(cpu))
+	if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
 		return;
 
 	/*
@@ -4085,8 +4093,11 @@ static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
 
 static inline void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
-		 bool task_sleep) {}
+util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+static inline void
+util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
+		bool task_sleep) {}
 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
 
 #endif /* CONFIG_SMP */
@@ -5589,6 +5600,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	int idle_h_nr_running = task_has_idle_policy(p);
 	bool was_sched_idle = sched_idle_rq(rq);
 
+	util_est_dequeue(&rq->cfs, p);
+
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		dequeue_entity(cfs_rq, se, flags);
@@ -5639,7 +5652,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 		rq->next_balance = jiffies;
 
 dequeue_throttle:
-	util_est_dequeue(&rq->cfs, p, task_sleep);
+	util_est_update(&rq->cfs, p, task_sleep);
 	hrtick_update(rq);
 }
 
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v3] sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue
  2020-12-18  9:27 [PATCH v3] sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue Xuewen Yan
@ 2020-12-18 13:12 ` Vincent Guittot
  2020-12-18 14:35   ` Peter Zijlstra
  2021-01-14 11:29 ` [tip: sched/core] " tip-bot2 for Xuewen Yan
  1 sibling, 1 reply; 4+ messages in thread
From: Vincent Guittot @ 2020-12-18 13:12 UTC (permalink / raw)
  To: Xuewen Yan
  Cc: Dietmar Eggemann, Juri Lelli, Peter Zijlstra, Ingo Molnar,
	Steven Rostedt, Ben Segall, Mel Gorman,
	Daniel Bristot de Oliveira, linux-kernel, Patrick Bellasi,
	Chunyan Zhang, 王科 (Ke Wang),
	Ryan Y, Xuewen Yan

On Fri, 18 Dec 2020 at 10:28, Xuewen Yan <xuewen.yan94@gmail.com> wrote:
>
> From: Xuewen Yan <xuewen.yan@unisoc.com>
>
> CPU (root cfs_rq) estimated utilization (util_est) is currently used in
> dequeue_task_fair() to drive frequency selection before it is updated.
>
> with:
>
> CPU_util        : rq->cfs.avg.util_avg
> CPU_util_est    : rq->cfs.avg.util_est
> CPU_utilization : max(CPU_util, CPU_util_est)
> task_util       : p->se.avg.util_avg
> task_util_est   : p->se.avg.util_est
>
> dequeue_task_fair():
>
>     /* (1) CPU_util and task_util update + inform schedutil about
>            CPU_utilization changes */
>     for_each_sched_entity() /* 2 loops */
>         (dequeue_entity() ->) update_load_avg() -> cfs_rq_util_change()
>          -> cpufreq_update_util() ->...-> sugov_update_[shared\|single]
>          -> sugov_get_util() -> cpu_util_cfs()
>
>     /* (2) CPU_util_est and task_util_est update */
>     util_est_dequeue()
>
> cpu_util_cfs() uses CPU_utilization which could lead to a false (too
> high) utilization value for schedutil in task ramp-down or ramp-up
> scenarios during task dequeue.
>
> To mitigate the issue split the util_est update (2) into:
>
>  (A) CPU_util_est update in util_est_dequeue()
>  (B) task_util_est update in util_est_update()
>
> Place (A) before (1) and keep (B) where (2) is. The latter is necessary
> since (B) relies on task_util update in (1).
>

maybe add a
Fixes: 7f65ea42eb00 ("sched/fair: Add util_est on top of PELT")

> Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>

Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---
> Changes since v2:
> -modify the comment
> -move util_est_dequeue above within_margin()
> -modify the tab and space
>
> Changes since v1:
> -change the util_est_dequeue/update to inline type
> -use unsigned int enqueued rather than util_est in util_est_dequeue
> -remove "cpu" var
>
> ---
>  kernel/sched/fair.c | 43 ++++++++++++++++++++++++++++---------------
>  1 file changed, 28 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index ae7ceba..f3a1b7a 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3932,6 +3932,22 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
>         trace_sched_util_est_cfs_tp(cfs_rq);
>  }
>
> +static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
> +                                   struct task_struct *p)
> +{
> +       unsigned int enqueued;
> +
> +       if (!sched_feat(UTIL_EST))
> +               return;
> +
> +       /* Update root cfs_rq's estimated utilization */
> +       enqueued  = cfs_rq->avg.util_est.enqueued;
> +       enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
> +       WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
> +
> +       trace_sched_util_est_cfs_tp(cfs_rq);
> +}
> +
>  /*
>   * Check if a (signed) value is within a specified (unsigned) margin,
>   * based on the observation that:
> @@ -3945,23 +3961,16 @@ static inline bool within_margin(int value, int margin)
>         return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
>  }
>
> -static void
> -util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
> +static inline void util_est_update(struct cfs_rq *cfs_rq,
> +                                  struct task_struct *p,
> +                                  bool task_sleep)
>  {
>         long last_ewma_diff;
>         struct util_est ue;
> -       int cpu;
>
>         if (!sched_feat(UTIL_EST))
>                 return;
>
> -       /* Update root cfs_rq's estimated utilization */
> -       ue.enqueued  = cfs_rq->avg.util_est.enqueued;
> -       ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
> -       WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
> -
> -       trace_sched_util_est_cfs_tp(cfs_rq);
> -
>         /*
>          * Skip update of task's estimated utilization when the task has not
>          * yet completed an activation, e.g. being migrated.
> @@ -4001,8 +4010,7 @@ static inline bool within_margin(int value, int margin)
>          * To avoid overestimation of actual task utilization, skip updates if
>          * we cannot grant there is idle time in this CPU.
>          */
> -       cpu = cpu_of(rq_of(cfs_rq));
> -       if (task_util(p) > capacity_orig_of(cpu))
> +       if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
>                 return;
>
>         /*
> @@ -4085,8 +4093,11 @@ static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
>  util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
>
>  static inline void
> -util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
> -                bool task_sleep) {}
> +util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
> +
> +static inline void
> +util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
> +               bool task_sleep) {}
>  static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
>
>  #endif /* CONFIG_SMP */
> @@ -5589,6 +5600,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
>         int idle_h_nr_running = task_has_idle_policy(p);
>         bool was_sched_idle = sched_idle_rq(rq);
>
> +       util_est_dequeue(&rq->cfs, p);
> +
>         for_each_sched_entity(se) {
>                 cfs_rq = cfs_rq_of(se);
>                 dequeue_entity(cfs_rq, se, flags);
> @@ -5639,7 +5652,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
>                 rq->next_balance = jiffies;
>
>  dequeue_throttle:
> -       util_est_dequeue(&rq->cfs, p, task_sleep);
> +       util_est_update(&rq->cfs, p, task_sleep);
>         hrtick_update(rq);
>  }
>
> --
> 1.9.1
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v3] sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue
  2020-12-18 13:12 ` Vincent Guittot
@ 2020-12-18 14:35   ` Peter Zijlstra
  0 siblings, 0 replies; 4+ messages in thread
From: Peter Zijlstra @ 2020-12-18 14:35 UTC (permalink / raw)
  To: Vincent Guittot
  Cc: Xuewen Yan, Dietmar Eggemann, Juri Lelli, Ingo Molnar,
	Steven Rostedt, Ben Segall, Mel Gorman,
	Daniel Bristot de Oliveira, linux-kernel, Patrick Bellasi,
	Chunyan Zhang, 王科 (Ke Wang),
	Ryan Y, Xuewen Yan

On Fri, Dec 18, 2020 at 02:12:50PM +0100, Vincent Guittot wrote:
> On Fri, 18 Dec 2020 at 10:28, Xuewen Yan <xuewen.yan94@gmail.com> wrote:
> >
> > From: Xuewen Yan <xuewen.yan@unisoc.com>
> >
> > CPU (root cfs_rq) estimated utilization (util_est) is currently used in
> > dequeue_task_fair() to drive frequency selection before it is updated.
> >
> > with:
> >
> > CPU_util        : rq->cfs.avg.util_avg
> > CPU_util_est    : rq->cfs.avg.util_est
> > CPU_utilization : max(CPU_util, CPU_util_est)
> > task_util       : p->se.avg.util_avg
> > task_util_est   : p->se.avg.util_est
> >
> > dequeue_task_fair():
> >
> >     /* (1) CPU_util and task_util update + inform schedutil about
> >            CPU_utilization changes */
> >     for_each_sched_entity() /* 2 loops */
> >         (dequeue_entity() ->) update_load_avg() -> cfs_rq_util_change()
> >          -> cpufreq_update_util() ->...-> sugov_update_[shared\|single]
> >          -> sugov_get_util() -> cpu_util_cfs()
> >
> >     /* (2) CPU_util_est and task_util_est update */
> >     util_est_dequeue()
> >
> > cpu_util_cfs() uses CPU_utilization which could lead to a false (too
> > high) utilization value for schedutil in task ramp-down or ramp-up
> > scenarios during task dequeue.
> >
> > To mitigate the issue split the util_est update (2) into:
> >
> >  (A) CPU_util_est update in util_est_dequeue()
> >  (B) task_util_est update in util_est_update()
> >
> > Place (A) before (1) and keep (B) where (2) is. The latter is necessary
> > since (B) relies on task_util update in (1).
> >
> 
> maybe add a
> Fixes: 7f65ea42eb00 ("sched/fair: Add util_est on top of PELT")
> 
> > Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
> > Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
> 
> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>

Thanks!

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [tip: sched/core] sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue
  2020-12-18  9:27 [PATCH v3] sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue Xuewen Yan
  2020-12-18 13:12 ` Vincent Guittot
@ 2021-01-14 11:29 ` tip-bot2 for Xuewen Yan
  1 sibling, 0 replies; 4+ messages in thread
From: tip-bot2 for Xuewen Yan @ 2021-01-14 11:29 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Xuewen Yan, Peter Zijlstra (Intel),
	Dietmar Eggemann, Vincent Guittot, x86, linux-kernel

The following commit has been merged into the sched/core branch of tip:

Commit-ID:     8c1f560c1ea3f19e22ba356f62680d9d449c9ec2
Gitweb:        https://git.kernel.org/tip/8c1f560c1ea3f19e22ba356f62680d9d449c9ec2
Author:        Xuewen Yan <xuewen.yan@unisoc.com>
AuthorDate:    Fri, 18 Dec 2020 17:27:52 +08:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Thu, 14 Jan 2021 11:20:10 +01:00

sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue

CPU (root cfs_rq) estimated utilization (util_est) is currently used in
dequeue_task_fair() to drive frequency selection before it is updated.

with:

CPU_util        : rq->cfs.avg.util_avg
CPU_util_est    : rq->cfs.avg.util_est
CPU_utilization : max(CPU_util, CPU_util_est)
task_util       : p->se.avg.util_avg
task_util_est   : p->se.avg.util_est

dequeue_task_fair():

    /* (1) CPU_util and task_util update + inform schedutil about
           CPU_utilization changes */
    for_each_sched_entity() /* 2 loops */
        (dequeue_entity() ->) update_load_avg() -> cfs_rq_util_change()
         -> cpufreq_update_util() ->...-> sugov_update_[shared\|single]
         -> sugov_get_util() -> cpu_util_cfs()

    /* (2) CPU_util_est and task_util_est update */
    util_est_dequeue()

cpu_util_cfs() uses CPU_utilization which could lead to a false (too
high) utilization value for schedutil in task ramp-down or ramp-up
scenarios during task dequeue.

To mitigate the issue split the util_est update (2) into:

 (A) CPU_util_est update in util_est_dequeue()
 (B) task_util_est update in util_est_update()

Place (A) before (1) and keep (B) where (2) is. The latter is necessary
since (B) relies on task_util update in (1).

Fixes: 7f65ea42eb00 ("sched/fair: Add util_est on top of PELT")
Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/1608283672-18240-1-git-send-email-xuewen.yan94@gmail.com
---
 kernel/sched/fair.c | 43 ++++++++++++++++++++++++++++---------------
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 389cb58..40d3ebf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3943,6 +3943,22 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
 	trace_sched_util_est_cfs_tp(cfs_rq);
 }
 
+static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
+				    struct task_struct *p)
+{
+	unsigned int enqueued;
+
+	if (!sched_feat(UTIL_EST))
+		return;
+
+	/* Update root cfs_rq's estimated utilization */
+	enqueued  = cfs_rq->avg.util_est.enqueued;
+	enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
+	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+	trace_sched_util_est_cfs_tp(cfs_rq);
+}
+
 /*
  * Check if a (signed) value is within a specified (unsigned) margin,
  * based on the observation that:
@@ -3956,23 +3972,16 @@ static inline bool within_margin(int value, int margin)
 	return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
 }
 
-static void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
+static inline void util_est_update(struct cfs_rq *cfs_rq,
+				   struct task_struct *p,
+				   bool task_sleep)
 {
 	long last_ewma_diff;
 	struct util_est ue;
-	int cpu;
 
 	if (!sched_feat(UTIL_EST))
 		return;
 
-	/* Update root cfs_rq's estimated utilization */
-	ue.enqueued  = cfs_rq->avg.util_est.enqueued;
-	ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
-	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
-
-	trace_sched_util_est_cfs_tp(cfs_rq);
-
 	/*
 	 * Skip update of task's estimated utilization when the task has not
 	 * yet completed an activation, e.g. being migrated.
@@ -4012,8 +4021,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
 	 * To avoid overestimation of actual task utilization, skip updates if
 	 * we cannot grant there is idle time in this CPU.
 	 */
-	cpu = cpu_of(rq_of(cfs_rq));
-	if (task_util(p) > capacity_orig_of(cpu))
+	if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
 		return;
 
 	/*
@@ -4096,8 +4104,11 @@ static inline void
 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
 
 static inline void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
-		 bool task_sleep) {}
+util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+static inline void
+util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
+		bool task_sleep) {}
 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
 
 #endif /* CONFIG_SMP */
@@ -5609,6 +5620,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	int idle_h_nr_running = task_has_idle_policy(p);
 	bool was_sched_idle = sched_idle_rq(rq);
 
+	util_est_dequeue(&rq->cfs, p);
+
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		dequeue_entity(cfs_rq, se, flags);
@@ -5659,7 +5672,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 		rq->next_balance = jiffies;
 
 dequeue_throttle:
-	util_est_dequeue(&rq->cfs, p, task_sleep);
+	util_est_update(&rq->cfs, p, task_sleep);
 	hrtick_update(rq);
 }
 

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-01-14 11:31 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-18  9:27 [PATCH v3] sched/fair: Avoid stale CPU util_est value for schedutil in task dequeue Xuewen Yan
2020-12-18 13:12 ` Vincent Guittot
2020-12-18 14:35   ` Peter Zijlstra
2021-01-14 11:29 ` [tip: sched/core] " tip-bot2 for Xuewen Yan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).