All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] sched/fair: combine detach into dequeue when migrating task
@ 2022-06-09  3:53 Chengming Zhou
  2022-06-10 10:49 ` Dietmar Eggemann
  0 siblings, 1 reply; 3+ messages in thread
From: Chengming Zhou @ 2022-06-09  3:53 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
	rostedt, bsegall, mgorman, bristot, vschneid
  Cc: linux-kernel, duanxiongchun, songmuchun, Chengming Zhou

When we are migrating task out of the CPU, we can combine detach
into dequeue_entity() to save the independent detach_entity_cfs_rq()
in migrate_task_rq_fair().

This optimization is like combining DO_ATTACH in the enqueue_entity()
when migrating task to the CPU.

So we don't have to traverse the CFS tree twice to do these load
detach and propagation.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
v2:
 - fix !CONFIG_SMP build error
---
 kernel/sched/fair.c | 34 +++++++++++++++++++++++-----------
 1 file changed, 23 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e0cd4052e32f..9052685db375 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3896,6 +3896,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 #define UPDATE_TG	0x1
 #define SKIP_AGE_LOAD	0x2
 #define DO_ATTACH	0x4
+#define DO_DETACH	0x8
 
 /* Update task and its cfs_rq load average */
 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -3913,7 +3914,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
 	decayed |= propagate_entity_load_avg(se);
 
-	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
+	if (flags & DO_DETACH) {
+		/*
+		 * DO_DETACH means we're here from dequeue_entity()
+		 * and we are migrating task out of the CPU.
+		 */
+		detach_entity_load_avg(cfs_rq, se);
+		update_tg_load_avg(cfs_rq);
+	} else if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
 
 		/*
 		 * DO_ATTACH means we're here from enqueue_entity().
@@ -4206,6 +4214,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
 #define UPDATE_TG	0x0
 #define SKIP_AGE_LOAD	0x0
 #define DO_ATTACH	0x0
+#define DO_DETACH	0x0
 
 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
 {
@@ -4426,6 +4435,14 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
 static void
 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
+	int action = UPDATE_TG;
+
+	/*
+	 * If we are migrating task from the CPU, detach load_avg when dequeue.
+	 */
+	if (entity_is_task(se) && task_of(se)->on_rq == TASK_ON_RQ_MIGRATING)
+		action |= DO_DETACH;
+
 	/*
 	 * Update run-time statistics of the 'current'.
 	 */
@@ -4440,7 +4457,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 	 *   - For group entity, update its weight to reflect the new share
 	 *     of its group cfs_rq.
 	 */
-	update_load_avg(cfs_rq, se, UPDATE_TG);
+	update_load_avg(cfs_rq, se, action);
 	se_update_runnable(se);
 
 	update_stats_dequeue_fair(cfs_rq, se, flags);
@@ -6940,15 +6957,10 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
 		se->vruntime -= min_vruntime;
 	}
 
-	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
-		/*
-		 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
-		 * rq->lock and can modify state directly.
-		 */
-		lockdep_assert_rq_held(task_rq(p));
-		detach_entity_cfs_rq(&p->se);
-
-	} else {
+	/*
+	 * In case of TASK_ON_RQ_MIGRATING we already detach in dequeue_entity.
+	 */
+	if (p->on_rq != TASK_ON_RQ_MIGRATING) {
 		/*
 		 * We are supposed to update the task to "current" time, then
 		 * its up to date and ready to go to new CPU/cfs_rq. But we
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] sched/fair: combine detach into dequeue when migrating task
  2022-06-09  3:53 [PATCH v2] sched/fair: combine detach into dequeue when migrating task Chengming Zhou
@ 2022-06-10 10:49 ` Dietmar Eggemann
  2022-06-10 10:55   ` [External] " Chengming Zhou
  0 siblings, 1 reply; 3+ messages in thread
From: Dietmar Eggemann @ 2022-06-10 10:49 UTC (permalink / raw)
  To: Chengming Zhou, mingo, peterz, juri.lelli, vincent.guittot,
	rostedt, bsegall, mgorman, bristot, vschneid
  Cc: linux-kernel, duanxiongchun, songmuchun

On 09/06/2022 05:53, Chengming Zhou wrote:
> When we are migrating task out of the CPU, we can combine detach
> into dequeue_entity() to save the independent detach_entity_cfs_rq()
> in migrate_task_rq_fair().
> 
> This optimization is like combining DO_ATTACH in the enqueue_entity()
> when migrating task to the CPU.
> 
> So we don't have to traverse the CFS tree twice to do these load
> detach and propagation.

By `propagation` you refer to the detach_entity_cfs_rq() ->
propagate_entity_cfs_rq() call?
This one wouldn't be called anymore with your change.

[...]

> @@ -4426,6 +4435,14 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
>  static void
>  dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>  {
> +	int action = UPDATE_TG;
> +
> +	/*
> +	 * If we are migrating task from the CPU, detach load_avg when dequeue.
> +	 */
> +	if (entity_is_task(se) && task_of(se)->on_rq == TASK_ON_RQ_MIGRATING)

- if (entity_is_task(se) && task_of(se)->on_rq == TASK_ON_RQ_MIGRATING)
+ if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))

> +		action |= DO_DETACH;

With the `entity_is_task(se)` you make sure we only call
detach_entity_load_avg() and update_tg_load_avg() for the se
representing the task itself (and not taskgroups the task might run in).
So IMHO this looks good.

You save the propagate_entity_cfs_rq(&p->se) call from (2) by doing the
detach_entity_load_avg(), update_tg_load_avg() for a migrating task
inside (1) (the task being the first se in the loop )

detach_task()
  deactivate_task()
    dequeue_task_fair()
      for_each_sched_entity(se)
        dequeue_entity()
          update_load_avg() /* (1) */

  set_task_cpu()
    migrate_task_rq_fair()
      /* called detach_entity_cfs_rq() before the patch (2) */

[...]

> @@ -6940,15 +6957,10 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
>  		se->vruntime -= min_vruntime;
>  	}
>  
> -	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
> -		/*
> -		 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
> -		 * rq->lock and can modify state directly.
> -		 */
> -		lockdep_assert_rq_held(task_rq(p));
> -		detach_entity_cfs_rq(&p->se);
> -
> -	} else {
> +	/*
> +	 * In case of TASK_ON_RQ_MIGRATING we already detach in dequeue_entity.
> +	 */
> +	if (p->on_rq != TASK_ON_RQ_MIGRATING) {

- if (p->on_rq != TASK_ON_RQ_MIGRATING) {
+ if (!task_on_rq_migrating(p)) {

[...]

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [External] Re: [PATCH v2] sched/fair: combine detach into dequeue when migrating task
  2022-06-10 10:49 ` Dietmar Eggemann
@ 2022-06-10 10:55   ` Chengming Zhou
  0 siblings, 0 replies; 3+ messages in thread
From: Chengming Zhou @ 2022-06-10 10:55 UTC (permalink / raw)
  To: Dietmar Eggemann, mingo, peterz, juri.lelli, vincent.guittot,
	rostedt, bsegall, mgorman, bristot, vschneid
  Cc: linux-kernel, duanxiongchun, songmuchun

Hi,

On 2022/6/10 18:49, Dietmar Eggemann wrote:
> On 09/06/2022 05:53, Chengming Zhou wrote:
>> When we are migrating task out of the CPU, we can combine detach
>> into dequeue_entity() to save the independent detach_entity_cfs_rq()
>> in migrate_task_rq_fair().
>>
>> This optimization is like combining DO_ATTACH in the enqueue_entity()
>> when migrating task to the CPU.
>>
>> So we don't have to traverse the CFS tree twice to do these load
>> detach and propagation.
> 
> By `propagation` you refer to the detach_entity_cfs_rq() ->
> propagate_entity_cfs_rq() call?
> This one wouldn't be called anymore with your change.

Yes.

> 
> [...]
> 
>> @@ -4426,6 +4435,14 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
>>  static void
>>  dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>>  {
>> +	int action = UPDATE_TG;
>> +
>> +	/*
>> +	 * If we are migrating task from the CPU, detach load_avg when dequeue.
>> +	 */
>> +	if (entity_is_task(se) && task_of(se)->on_rq == TASK_ON_RQ_MIGRATING)
> 
> - if (entity_is_task(se) && task_of(se)->on_rq == TASK_ON_RQ_MIGRATING)
> + if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))

Better, will do.

> 
>> +		action |= DO_DETACH;
> 
> With the `entity_is_task(se)` you make sure we only call
> detach_entity_load_avg() and update_tg_load_avg() for the se
> representing the task itself (and not taskgroups the task might run in).
> So IMHO this looks good.
> 
> You save the propagate_entity_cfs_rq(&p->se) call from (2) by doing the
> detach_entity_load_avg(), update_tg_load_avg() for a migrating task
> inside (1) (the task being the first se in the loop )
> 
> detach_task()
>   deactivate_task()
>     dequeue_task_fair()
>       for_each_sched_entity(se)
>         dequeue_entity()
>           update_load_avg() /* (1) */
> 
>   set_task_cpu()
>     migrate_task_rq_fair()
>       /* called detach_entity_cfs_rq() before the patch (2) */

Thanks for the description, it's clearer, I should put it in the commit message.

> 
> [...]
> 
>> @@ -6940,15 +6957,10 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
>>  		se->vruntime -= min_vruntime;
>>  	}
>>  
>> -	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
>> -		/*
>> -		 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
>> -		 * rq->lock and can modify state directly.
>> -		 */
>> -		lockdep_assert_rq_held(task_rq(p));
>> -		detach_entity_cfs_rq(&p->se);
>> -
>> -	} else {
>> +	/*
>> +	 * In case of TASK_ON_RQ_MIGRATING we already detach in dequeue_entity.
>> +	 */
>> +	if (p->on_rq != TASK_ON_RQ_MIGRATING) {
> 
> - if (p->on_rq != TASK_ON_RQ_MIGRATING) {
> + if (!task_on_rq_migrating(p)) {

Will do. Thanks!

> 
> [...]

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-06-10 10:59 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-09  3:53 [PATCH v2] sched/fair: combine detach into dequeue when migrating task Chengming Zhou
2022-06-10 10:49 ` Dietmar Eggemann
2022-06-10 10:55   ` [External] " Chengming Zhou

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.