linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4] sched/fair: combine detach into dequeue when migrating task
@ 2022-06-20 13:36 Chengming Zhou
  2022-06-28 12:32 ` Chengming Zhou
  2022-06-28 15:26 ` Vincent Guittot
  0 siblings, 2 replies; 4+ messages in thread
From: Chengming Zhou @ 2022-06-20 13:36 UTC (permalink / raw)
  To: mingo, peterz, vincent.guittot, dietmar.eggemann, bsegall, vschneid
  Cc: linux-kernel, Chengming Zhou

When we are migrating task out of the CPU, we can combine detach and
propagation into dequeue_entity() to save the detach_entity_cfs_rq()
-> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
migrate_task_rq_fair().

This optimization is like combining DO_ATTACH in the enqueue_entity()
when migrating task to the CPU.

So we don't have to traverse the CFS tree extra time to do the
detach_entity_cfs_rq() -> propagate_entity_cfs_rq() call, which
wouldn't be called anymore with this patch's change.

Copied from Dietmar's much clearer comment:

detach_task()
  deactivate_task()
    dequeue_task_fair()
      for_each_sched_entity(se)
        dequeue_entity()
          update_load_avg() /* (1) */

  set_task_cpu()
    migrate_task_rq_fair()
      /* called detach_entity_cfs_rq() before the patch (2) */

This patch save the propagate_entity_cfs_rq(&p->se) call from (2)
by doing the detach_entity_load_avg(), update_tg_load_avg() for
a migrating task inside (1) (the task being the first se in the loop)

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
v4:
 - change the commit message a little.
 - remove the forward declaration of detach_entity_cfs_rq()
 - remove verbose comments in code.

v3:
 - change to use task_on_rq_migrating() and put Dietmar's much clearer
   description in the commit message. Thanks!

v2:
 - fix !CONFIG_SMP build error
---
 kernel/sched/fair.c | 30 +++++++++++++++++-------------
 1 file changed, 17 insertions(+), 13 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8bed75757e65..31d53c11e244 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3931,6 +3931,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 #define UPDATE_TG	0x1
 #define SKIP_AGE_LOAD	0x2
 #define DO_ATTACH	0x4
+#define DO_DETACH	0x8
 
 /* Update task and its cfs_rq load average */
 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -3948,7 +3949,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
 	decayed |= propagate_entity_load_avg(se);
 
-	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
+	if (flags & DO_DETACH) {
+		/*
+		 * DO_DETACH means we're here from dequeue_entity()
+		 * and we are migrating task out of the CPU.
+		 */
+		detach_entity_load_avg(cfs_rq, se);
+		update_tg_load_avg(cfs_rq);
+	} else if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
 
 		/*
 		 * DO_ATTACH means we're here from enqueue_entity().
@@ -4241,6 +4249,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
 #define UPDATE_TG	0x0
 #define SKIP_AGE_LOAD	0x0
 #define DO_ATTACH	0x0
+#define DO_DETACH	0x0
 
 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
 {
@@ -4460,6 +4469,11 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
 static void
 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
+	int action = UPDATE_TG;
+
+	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
+		action |= DO_DETACH;
+
 	/*
 	 * Update run-time statistics of the 'current'.
 	 */
@@ -4473,7 +4487,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 	 *   - For group entity, update its weight to reflect the new share
 	 *     of its group cfs_rq.
 	 */
-	update_load_avg(cfs_rq, se, UPDATE_TG);
+	update_load_avg(cfs_rq, se, action);
 	se_update_runnable(se);
 
 	update_stats_dequeue_fair(cfs_rq, se, flags);
@@ -6938,8 +6952,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
 	return new_cpu;
 }
 
-static void detach_entity_cfs_rq(struct sched_entity *se);
-
 /*
  * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
  * cfs_rq_of(p) references at time of call are still valid and identify the
@@ -6973,15 +6985,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
 		se->vruntime -= min_vruntime;
 	}
 
-	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
-		/*
-		 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
-		 * rq->lock and can modify state directly.
-		 */
-		lockdep_assert_rq_held(task_rq(p));
-		detach_entity_cfs_rq(&p->se);
-
-	} else {
+	if (!task_on_rq_migrating(p)) {
 		/*
 		 * We are supposed to update the task to "current" time, then
 		 * its up to date and ready to go to new CPU/cfs_rq. But we
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] sched/fair: combine detach into dequeue when migrating task
  2022-06-20 13:36 [PATCH v4] sched/fair: combine detach into dequeue when migrating task Chengming Zhou
@ 2022-06-28 12:32 ` Chengming Zhou
  2022-06-28 15:26 ` Vincent Guittot
  1 sibling, 0 replies; 4+ messages in thread
From: Chengming Zhou @ 2022-06-28 12:32 UTC (permalink / raw)
  To: mingo, peterz, vincent.guittot, dietmar.eggemann, bsegall, vschneid
  Cc: linux-kernel

Hello, friendly ping...

Thanks.

On 2022/6/20 21:36, Chengming Zhou wrote:
> When we are migrating task out of the CPU, we can combine detach and
> propagation into dequeue_entity() to save the detach_entity_cfs_rq()
> -> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
> migrate_task_rq_fair().
> 
> This optimization is like combining DO_ATTACH in the enqueue_entity()
> when migrating task to the CPU.
> 
> So we don't have to traverse the CFS tree extra time to do the
> detach_entity_cfs_rq() -> propagate_entity_cfs_rq() call, which
> wouldn't be called anymore with this patch's change.
> 
> Copied from Dietmar's much clearer comment:
> 
> detach_task()
>   deactivate_task()
>     dequeue_task_fair()
>       for_each_sched_entity(se)
>         dequeue_entity()
>           update_load_avg() /* (1) */
> 
>   set_task_cpu()
>     migrate_task_rq_fair()
>       /* called detach_entity_cfs_rq() before the patch (2) */
> 
> This patch save the propagate_entity_cfs_rq(&p->se) call from (2)
> by doing the detach_entity_load_avg(), update_tg_load_avg() for
> a migrating task inside (1) (the task being the first se in the loop)
> 
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> ---
> v4:
>  - change the commit message a little.
>  - remove the forward declaration of detach_entity_cfs_rq()
>  - remove verbose comments in code.
> 
> v3:
>  - change to use task_on_rq_migrating() and put Dietmar's much clearer
>    description in the commit message. Thanks!
> 
> v2:
>  - fix !CONFIG_SMP build error
> ---
>  kernel/sched/fair.c | 30 +++++++++++++++++-------------
>  1 file changed, 17 insertions(+), 13 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 8bed75757e65..31d53c11e244 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3931,6 +3931,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>  #define UPDATE_TG	0x1
>  #define SKIP_AGE_LOAD	0x2
>  #define DO_ATTACH	0x4
> +#define DO_DETACH	0x8
>  
>  /* Update task and its cfs_rq load average */
>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> @@ -3948,7 +3949,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>  	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
>  	decayed |= propagate_entity_load_avg(se);
>  
> -	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
> +	if (flags & DO_DETACH) {
> +		/*
> +		 * DO_DETACH means we're here from dequeue_entity()
> +		 * and we are migrating task out of the CPU.
> +		 */
> +		detach_entity_load_avg(cfs_rq, se);
> +		update_tg_load_avg(cfs_rq);
> +	} else if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
>  
>  		/*
>  		 * DO_ATTACH means we're here from enqueue_entity().
> @@ -4241,6 +4249,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
>  #define UPDATE_TG	0x0
>  #define SKIP_AGE_LOAD	0x0
>  #define DO_ATTACH	0x0
> +#define DO_DETACH	0x0
>  
>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
>  {
> @@ -4460,6 +4469,11 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
>  static void
>  dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>  {
> +	int action = UPDATE_TG;
> +
> +	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
> +		action |= DO_DETACH;
> +
>  	/*
>  	 * Update run-time statistics of the 'current'.
>  	 */
> @@ -4473,7 +4487,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>  	 *   - For group entity, update its weight to reflect the new share
>  	 *     of its group cfs_rq.
>  	 */
> -	update_load_avg(cfs_rq, se, UPDATE_TG);
> +	update_load_avg(cfs_rq, se, action);
>  	se_update_runnable(se);
>  
>  	update_stats_dequeue_fair(cfs_rq, se, flags);
> @@ -6938,8 +6952,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
>  	return new_cpu;
>  }
>  
> -static void detach_entity_cfs_rq(struct sched_entity *se);
> -
>  /*
>   * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
>   * cfs_rq_of(p) references at time of call are still valid and identify the
> @@ -6973,15 +6985,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
>  		se->vruntime -= min_vruntime;
>  	}
>  
> -	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
> -		/*
> -		 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
> -		 * rq->lock and can modify state directly.
> -		 */
> -		lockdep_assert_rq_held(task_rq(p));
> -		detach_entity_cfs_rq(&p->se);
> -
> -	} else {
> +	if (!task_on_rq_migrating(p)) {
>  		/*
>  		 * We are supposed to update the task to "current" time, then
>  		 * its up to date and ready to go to new CPU/cfs_rq. But we

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] sched/fair: combine detach into dequeue when migrating task
  2022-06-20 13:36 [PATCH v4] sched/fair: combine detach into dequeue when migrating task Chengming Zhou
  2022-06-28 12:32 ` Chengming Zhou
@ 2022-06-28 15:26 ` Vincent Guittot
  2022-06-29  1:37   ` [External] " Chengming Zhou
  1 sibling, 1 reply; 4+ messages in thread
From: Vincent Guittot @ 2022-06-28 15:26 UTC (permalink / raw)
  To: Chengming Zhou
  Cc: mingo, peterz, dietmar.eggemann, bsegall, vschneid, linux-kernel

On Mon, 20 Jun 2022 at 15:36, Chengming Zhou
<zhouchengming@bytedance.com> wrote:
>
> When we are migrating task out of the CPU, we can combine detach and
> propagation into dequeue_entity() to save the detach_entity_cfs_rq()
> -> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
> migrate_task_rq_fair().
>
> This optimization is like combining DO_ATTACH in the enqueue_entity()
> when migrating task to the CPU.
> -> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
> migrate_task_rq_fair().
>
>
> So we don't have to traverse the CFS tree extra time to do the
> detach_entity_cfs_rq() -> propagate_entity_cfs_rq() call, which
> wouldn't be called anymore with this patch's change.
>
> Copied from Dietmar's much clearer comment:

Although It's nice to keep the author of a comment, it should not be
part of the commit message.

> detach_task()
>   deactivate_task()
>     dequeue_task_fair()
>       for_each_sched_entity(se)
>         dequeue_entity()
>           update_load_avg()   /* (1) */
              detach_entity_load_avg()

> set_task_cpu()
>  migrate_task_rq_fair()
>    detach_entity_cfs_rq() /* (2) */
       update_load_avg();
       detach_entity_load_avg();
       propagate_entity_cfs_rq();
         for_each_sched_entity()
           update_load_avg()

Could you add the full call stack so we can see more easily the extra
loop that is saved
>
> This patch save the propagate_entity_cfs_rq(&p->se) call from (2)
> by doing the detach_entity_load_avg(), update_tg_load_avg() for
> a migrating task inside (1) (the task being the first se in the loop)
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>

Apart the small nit above:

Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---
> v4:
>  - change the commit message a little.
>  - remove the forward declaration of detach_entity_cfs_rq()
>  - remove verbose comments in code.
>
> v3:
>  - change to use task_on_rq_migrating() and put Dietmar's much clearer
>    description in the commit message. Thanks!
>
> v2:
>  - fix !CONFIG_SMP build error
> ---
>  kernel/sched/fair.c | 30 +++++++++++++++++-------------
>  1 file changed, 17 insertions(+), 13 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 8bed75757e65..31d53c11e244 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3931,6 +3931,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>  #define UPDATE_TG      0x1
>  #define SKIP_AGE_LOAD  0x2
>  #define DO_ATTACH      0x4
> +#define DO_DETACH      0x8
>
>  /* Update task and its cfs_rq load average */
>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> @@ -3948,7 +3949,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>         decayed  = update_cfs_rq_load_avg(now, cfs_rq);
>         decayed |= propagate_entity_load_avg(se);
>
> -       if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
> +       if (flags & DO_DETACH) {
> +               /*
> +                * DO_DETACH means we're here from dequeue_entity()
> +                * and we are migrating task out of the CPU.
> +                */
> +               detach_entity_load_avg(cfs_rq, se);
> +               update_tg_load_avg(cfs_rq);
> +       } else if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
>
>                 /*
>                  * DO_ATTACH means we're here from enqueue_entity().
> @@ -4241,6 +4249,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
>  #define UPDATE_TG      0x0
>  #define SKIP_AGE_LOAD  0x0
>  #define DO_ATTACH      0x0
> +#define DO_DETACH      0x0
>
>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
>  {
> @@ -4460,6 +4469,11 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
>  static void
>  dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>  {
> +       int action = UPDATE_TG;
> +
> +       if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
> +               action |= DO_DETACH;
> +
>         /*
>          * Update run-time statistics of the 'current'.
>          */
> @@ -4473,7 +4487,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>          *   - For group entity, update its weight to reflect the new share
>          *     of its group cfs_rq.
>          */
> -       update_load_avg(cfs_rq, se, UPDATE_TG);
> +       update_load_avg(cfs_rq, se, action);
>         se_update_runnable(se);
>
>         update_stats_dequeue_fair(cfs_rq, se, flags);
> @@ -6938,8 +6952,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
>         return new_cpu;
>  }
>
> -static void detach_entity_cfs_rq(struct sched_entity *se);
> -
>  /*
>   * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
>   * cfs_rq_of(p) references at time of call are still valid and identify the
> @@ -6973,15 +6985,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
>                 se->vruntime -= min_vruntime;
>         }
>
> -       if (p->on_rq == TASK_ON_RQ_MIGRATING) {
> -               /*
> -                * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
> -                * rq->lock and can modify state directly.
> -                */
> -               lockdep_assert_rq_held(task_rq(p));
> -               detach_entity_cfs_rq(&p->se);
> -
> -       } else {
> +       if (!task_on_rq_migrating(p)) {
>                 /*
>                  * We are supposed to update the task to "current" time, then
>                  * its up to date and ready to go to new CPU/cfs_rq. But we
> --
> 2.36.1
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [External] Re: [PATCH v4] sched/fair: combine detach into dequeue when migrating task
  2022-06-28 15:26 ` Vincent Guittot
@ 2022-06-29  1:37   ` Chengming Zhou
  0 siblings, 0 replies; 4+ messages in thread
From: Chengming Zhou @ 2022-06-29  1:37 UTC (permalink / raw)
  To: Vincent Guittot
  Cc: mingo, peterz, dietmar.eggemann, bsegall, vschneid, linux-kernel

Hi, thanks for your review.

On 2022/6/28 23:26, Vincent Guittot wrote:
> On Mon, 20 Jun 2022 at 15:36, Chengming Zhou
> <zhouchengming@bytedance.com> wrote:
>>
>> When we are migrating task out of the CPU, we can combine detach and
>> propagation into dequeue_entity() to save the detach_entity_cfs_rq()
>> -> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
>> migrate_task_rq_fair().
>>
>> This optimization is like combining DO_ATTACH in the enqueue_entity()
>> when migrating task to the CPU.
>> -> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
>> migrate_task_rq_fair().
>>
>>
>> So we don't have to traverse the CFS tree extra time to do the
>> detach_entity_cfs_rq() -> propagate_entity_cfs_rq() call, which
>> wouldn't be called anymore with this patch's change.
>>
>> Copied from Dietmar's much clearer comment:
> 
> Although It's nice to keep the author of a comment, it should not be
> part of the commit message.

Ok, I will delete this.

> 
>> detach_task()
>>   deactivate_task()
>>     dequeue_task_fair()
>>       for_each_sched_entity(se)
>>         dequeue_entity()
>>           update_load_avg()   /* (1) */
>               detach_entity_load_avg()
> 
>> set_task_cpu()
>>  migrate_task_rq_fair()
>>    detach_entity_cfs_rq() /* (2) */
>        update_load_avg();
>        detach_entity_load_avg();
>        propagate_entity_cfs_rq();
>          for_each_sched_entity()
>            update_load_avg()
> 
> Could you add the full call stack so we can see more easily the extra
> loop that is saved

Good advice, I will add it.

Thanks.

>>
>> This patch save the propagate_entity_cfs_rq(&p->se) call from (2)
>> by doing the detach_entity_load_avg(), update_tg_load_avg() for
>> a migrating task inside (1) (the task being the first se in the loop)
>>
>> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> 
> Apart the small nit above:
> 
> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
> 
>> ---
>> v4:
>>  - change the commit message a little.
>>  - remove the forward declaration of detach_entity_cfs_rq()
>>  - remove verbose comments in code.
>>
>> v3:
>>  - change to use task_on_rq_migrating() and put Dietmar's much clearer
>>    description in the commit message. Thanks!
>>
>> v2:
>>  - fix !CONFIG_SMP build error
>> ---
>>  kernel/sched/fair.c | 30 +++++++++++++++++-------------
>>  1 file changed, 17 insertions(+), 13 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 8bed75757e65..31d53c11e244 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -3931,6 +3931,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>>  #define UPDATE_TG      0x1
>>  #define SKIP_AGE_LOAD  0x2
>>  #define DO_ATTACH      0x4
>> +#define DO_DETACH      0x8
>>
>>  /* Update task and its cfs_rq load average */
>>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>> @@ -3948,7 +3949,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>>         decayed  = update_cfs_rq_load_avg(now, cfs_rq);
>>         decayed |= propagate_entity_load_avg(se);
>>
>> -       if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
>> +       if (flags & DO_DETACH) {
>> +               /*
>> +                * DO_DETACH means we're here from dequeue_entity()
>> +                * and we are migrating task out of the CPU.
>> +                */
>> +               detach_entity_load_avg(cfs_rq, se);
>> +               update_tg_load_avg(cfs_rq);
>> +       } else if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
>>
>>                 /*
>>                  * DO_ATTACH means we're here from enqueue_entity().
>> @@ -4241,6 +4249,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
>>  #define UPDATE_TG      0x0
>>  #define SKIP_AGE_LOAD  0x0
>>  #define DO_ATTACH      0x0
>> +#define DO_DETACH      0x0
>>
>>  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
>>  {
>> @@ -4460,6 +4469,11 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
>>  static void
>>  dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>>  {
>> +       int action = UPDATE_TG;
>> +
>> +       if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
>> +               action |= DO_DETACH;
>> +
>>         /*
>>          * Update run-time statistics of the 'current'.
>>          */
>> @@ -4473,7 +4487,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>>          *   - For group entity, update its weight to reflect the new share
>>          *     of its group cfs_rq.
>>          */
>> -       update_load_avg(cfs_rq, se, UPDATE_TG);
>> +       update_load_avg(cfs_rq, se, action);
>>         se_update_runnable(se);
>>
>>         update_stats_dequeue_fair(cfs_rq, se, flags);
>> @@ -6938,8 +6952,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
>>         return new_cpu;
>>  }
>>
>> -static void detach_entity_cfs_rq(struct sched_entity *se);
>> -
>>  /*
>>   * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
>>   * cfs_rq_of(p) references at time of call are still valid and identify the
>> @@ -6973,15 +6985,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
>>                 se->vruntime -= min_vruntime;
>>         }
>>
>> -       if (p->on_rq == TASK_ON_RQ_MIGRATING) {
>> -               /*
>> -                * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
>> -                * rq->lock and can modify state directly.
>> -                */
>> -               lockdep_assert_rq_held(task_rq(p));
>> -               detach_entity_cfs_rq(&p->se);
>> -
>> -       } else {
>> +       if (!task_on_rq_migrating(p)) {
>>                 /*
>>                  * We are supposed to update the task to "current" time, then
>>                  * its up to date and ready to go to new CPU/cfs_rq. But we
>> --
>> 2.36.1
>>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-06-29  1:38 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-20 13:36 [PATCH v4] sched/fair: combine detach into dequeue when migrating task Chengming Zhou
2022-06-28 12:32 ` Chengming Zhou
2022-06-28 15:26 ` Vincent Guittot
2022-06-29  1:37   ` [External] " Chengming Zhou

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).