All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)
@ 2021-09-01  0:46 Monk Liu
  2021-09-01  0:46 ` [PATCH 2/2] drm/sched: serialize job_timeout and scheduler Monk Liu
  2021-09-01  6:50 ` [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4) Christian König
  0 siblings, 2 replies; 8+ messages in thread
From: Monk Liu @ 2021-09-01  0:46 UTC (permalink / raw)
  To: amd-gfx; +Cc: dri-devel, Monk Liu

issue:
in cleanup_job the cancle_delayed_work will cancel a TO timer
even the its corresponding job is still running.

fix:
do not cancel the timer in cleanup_job, instead do the cancelling
only when the heading job is signaled, and if there is a "next" job
we start_timeout again.

v2:
further cleanup the logic, and do the TDR timer cancelling if the signaled job
is the last one in its scheduler.

v3:
change the issue description
remove the cancel_delayed_work in the begining of the cleanup_job
recover the implement of drm_sched_job_begin.

v4:
remove the kthread_should_park() checking in cleanup_job routine,
we should cleanup the signaled job asap

TODO:
1)introduce pause/resume scheduler in job_timeout to serial the handling
of scheduler and job_timeout.
2)drop the bad job's del and insert in scheduler due to above serialization
(no race issue anymore with the serialization)

tested-by: jingwen <jingwen.chen@@amd.com>
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 26 +++++++++-----------------
 1 file changed, 9 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index a2a9536..3e0bbc7 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -676,15 +676,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 {
 	struct drm_sched_job *job, *next;
 
-	/*
-	 * Don't destroy jobs while the timeout worker is running  OR thread
-	 * is being parked and hence assumed to not touch pending_list
-	 */
-	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-	    !cancel_delayed_work(&sched->work_tdr)) ||
-	    kthread_should_park())
-		return NULL;
-
 	spin_lock(&sched->job_list_lock);
 
 	job = list_first_entry_or_null(&sched->pending_list,
@@ -693,17 +684,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
 		/* remove job from pending_list */
 		list_del_init(&job->list);
+
+		/* cancel this job's TO timer */
+		cancel_delayed_work(&sched->work_tdr);
 		/* make the scheduled timestamp more accurate */
 		next = list_first_entry_or_null(&sched->pending_list,
 						typeof(*next), list);
-		if (next)
+
+		if (next) {
 			next->s_fence->scheduled.timestamp =
 				job->s_fence->finished.timestamp;
-
+			/* start TO timer for next job */
+			drm_sched_start_timeout(sched);
+		}
 	} else {
 		job = NULL;
-		/* queue timeout for next job */
-		drm_sched_start_timeout(sched);
 	}
 
 	spin_unlock(&sched->job_list_lock);
@@ -791,11 +786,8 @@ static int drm_sched_main(void *param)
 					  (entity = drm_sched_select_entity(sched))) ||
 					 kthread_should_stop());
 
-		if (cleanup_job) {
+		if (cleanup_job)
 			sched->ops->free_job(cleanup_job);
-			/* queue timeout for next job */
-			drm_sched_start_timeout(sched);
-		}
 
 		if (!entity)
 			continue;
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] drm/sched: serialize job_timeout and scheduler
  2021-09-01  0:46 [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4) Monk Liu
@ 2021-09-01  0:46 ` Monk Liu
  2021-09-01  6:50 ` [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4) Christian König
  1 sibling, 0 replies; 8+ messages in thread
From: Monk Liu @ 2021-09-01  0:46 UTC (permalink / raw)
  To: amd-gfx; +Cc: dri-devel, Monk Liu, jingwen chen

tested-by: jingwen chen <jingwen.chen@amd.com>
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: jingwen chen <jingwen.chen@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 24 ++++--------------------
 1 file changed, 4 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 3e0bbc7..87d72e9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -319,19 +319,17 @@ static void drm_sched_job_timedout(struct work_struct *work)
 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
 
 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+	if (!__kthread_should_park(sched->thread))
+		kthread_park(sched->thread);
+
 	spin_lock(&sched->job_list_lock);
 	job = list_first_entry_or_null(&sched->pending_list,
 				       struct drm_sched_job, list);
 
 	if (job) {
-		/*
-		 * Remove the bad job so it cannot be freed by concurrent
-		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
-		 * is parked at which point it's safe.
-		 */
-		list_del_init(&job->list);
 		spin_unlock(&sched->job_list_lock);
 
+		/* vendor's timeout_job should call drm_sched_start() */
 		status = job->sched->ops->timedout_job(job);
 
 		/*
@@ -393,20 +391,6 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 	kthread_park(sched->thread);
 
 	/*
-	 * Reinsert back the bad job here - now it's safe as
-	 * drm_sched_get_cleanup_job cannot race against us and release the
-	 * bad job at this point - we parked (waited for) any in progress
-	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
-	 * now until the scheduler thread is unparked.
-	 */
-	if (bad && bad->sched == sched)
-		/*
-		 * Add at the head of the queue to reflect it was the earliest
-		 * job extracted.
-		 */
-		list_add(&bad->list, &sched->pending_list);
-
-	/*
 	 * Iterate the job list from later to  earlier one and either deactive
 	 * their HW callbacks or remove them from pending list if they already
 	 * signaled.
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)
  2021-09-01  0:46 [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4) Monk Liu
  2021-09-01  0:46 ` [PATCH 2/2] drm/sched: serialize job_timeout and scheduler Monk Liu
@ 2021-09-01  6:50 ` Christian König
  2021-09-01 20:42   ` Alex Deucher
  1 sibling, 1 reply; 8+ messages in thread
From: Christian König @ 2021-09-01  6:50 UTC (permalink / raw)
  To: Monk Liu, amd-gfx; +Cc: dri-devel

Am 01.09.21 um 02:46 schrieb Monk Liu:
> issue:
> in cleanup_job the cancle_delayed_work will cancel a TO timer
> even the its corresponding job is still running.
>
> fix:
> do not cancel the timer in cleanup_job, instead do the cancelling
> only when the heading job is signaled, and if there is a "next" job
> we start_timeout again.
>
> v2:
> further cleanup the logic, and do the TDR timer cancelling if the signaled job
> is the last one in its scheduler.
>
> v3:
> change the issue description
> remove the cancel_delayed_work in the begining of the cleanup_job
> recover the implement of drm_sched_job_begin.
>
> v4:
> remove the kthread_should_park() checking in cleanup_job routine,
> we should cleanup the signaled job asap
>
> TODO:
> 1)introduce pause/resume scheduler in job_timeout to serial the handling
> of scheduler and job_timeout.
> 2)drop the bad job's del and insert in scheduler due to above serialization
> (no race issue anymore with the serialization)
>
> tested-by: jingwen <jingwen.chen@@amd.com>
> Signed-off-by: Monk Liu <Monk.Liu@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/scheduler/sched_main.c | 26 +++++++++-----------------
>   1 file changed, 9 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index a2a9536..3e0bbc7 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -676,15 +676,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
>   {
>   	struct drm_sched_job *job, *next;
>   
> -	/*
> -	 * Don't destroy jobs while the timeout worker is running  OR thread
> -	 * is being parked and hence assumed to not touch pending_list
> -	 */
> -	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> -	    !cancel_delayed_work(&sched->work_tdr)) ||
> -	    kthread_should_park())
> -		return NULL;
> -
>   	spin_lock(&sched->job_list_lock);
>   
>   	job = list_first_entry_or_null(&sched->pending_list,
> @@ -693,17 +684,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
>   	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
>   		/* remove job from pending_list */
>   		list_del_init(&job->list);
> +
> +		/* cancel this job's TO timer */
> +		cancel_delayed_work(&sched->work_tdr);
>   		/* make the scheduled timestamp more accurate */
>   		next = list_first_entry_or_null(&sched->pending_list,
>   						typeof(*next), list);
> -		if (next)
> +
> +		if (next) {
>   			next->s_fence->scheduled.timestamp =
>   				job->s_fence->finished.timestamp;
> -
> +			/* start TO timer for next job */
> +			drm_sched_start_timeout(sched);
> +		}
>   	} else {
>   		job = NULL;
> -		/* queue timeout for next job */
> -		drm_sched_start_timeout(sched);
>   	}
>   
>   	spin_unlock(&sched->job_list_lock);
> @@ -791,11 +786,8 @@ static int drm_sched_main(void *param)
>   					  (entity = drm_sched_select_entity(sched))) ||
>   					 kthread_should_stop());
>   
> -		if (cleanup_job) {
> +		if (cleanup_job)
>   			sched->ops->free_job(cleanup_job);
> -			/* queue timeout for next job */
> -			drm_sched_start_timeout(sched);
> -		}
>   
>   		if (!entity)
>   			continue;


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)
  2021-09-01  6:50 ` [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4) Christian König
@ 2021-09-01 20:42   ` Alex Deucher
  2021-09-14 19:33     ` Alex Deucher
  0 siblings, 1 reply; 8+ messages in thread
From: Alex Deucher @ 2021-09-01 20:42 UTC (permalink / raw)
  To: Christian König; +Cc: Monk Liu, amd-gfx list, Maling list - DRI developers

On Wed, Sep 1, 2021 at 2:50 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Am 01.09.21 um 02:46 schrieb Monk Liu:
> > issue:
> > in cleanup_job the cancle_delayed_work will cancel a TO timer
> > even the its corresponding job is still running.
> >
> > fix:
> > do not cancel the timer in cleanup_job, instead do the cancelling
> > only when the heading job is signaled, and if there is a "next" job
> > we start_timeout again.
> >
> > v2:
> > further cleanup the logic, and do the TDR timer cancelling if the signaled job
> > is the last one in its scheduler.
> >
> > v3:
> > change the issue description
> > remove the cancel_delayed_work in the begining of the cleanup_job
> > recover the implement of drm_sched_job_begin.
> >
> > v4:
> > remove the kthread_should_park() checking in cleanup_job routine,
> > we should cleanup the signaled job asap
> >
> > TODO:
> > 1)introduce pause/resume scheduler in job_timeout to serial the handling
> > of scheduler and job_timeout.
> > 2)drop the bad job's del and insert in scheduler due to above serialization
> > (no race issue anymore with the serialization)
> >
> > tested-by: jingwen <jingwen.chen@@amd.com>
> > Signed-off-by: Monk Liu <Monk.Liu@amd.com>
>
> Reviewed-by: Christian König <christian.koenig@amd.com>
>

Are you planning to push this to drm-misc?

Alex


> > ---
> >   drivers/gpu/drm/scheduler/sched_main.c | 26 +++++++++-----------------
> >   1 file changed, 9 insertions(+), 17 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index a2a9536..3e0bbc7 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -676,15 +676,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> >   {
> >       struct drm_sched_job *job, *next;
> >
> > -     /*
> > -      * Don't destroy jobs while the timeout worker is running  OR thread
> > -      * is being parked and hence assumed to not touch pending_list
> > -      */
> > -     if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> > -         !cancel_delayed_work(&sched->work_tdr)) ||
> > -         kthread_should_park())
> > -             return NULL;
> > -
> >       spin_lock(&sched->job_list_lock);
> >
> >       job = list_first_entry_or_null(&sched->pending_list,
> > @@ -693,17 +684,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> >       if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
> >               /* remove job from pending_list */
> >               list_del_init(&job->list);
> > +
> > +             /* cancel this job's TO timer */
> > +             cancel_delayed_work(&sched->work_tdr);
> >               /* make the scheduled timestamp more accurate */
> >               next = list_first_entry_or_null(&sched->pending_list,
> >                                               typeof(*next), list);
> > -             if (next)
> > +
> > +             if (next) {
> >                       next->s_fence->scheduled.timestamp =
> >                               job->s_fence->finished.timestamp;
> > -
> > +                     /* start TO timer for next job */
> > +                     drm_sched_start_timeout(sched);
> > +             }
> >       } else {
> >               job = NULL;
> > -             /* queue timeout for next job */
> > -             drm_sched_start_timeout(sched);
> >       }
> >
> >       spin_unlock(&sched->job_list_lock);
> > @@ -791,11 +786,8 @@ static int drm_sched_main(void *param)
> >                                         (entity = drm_sched_select_entity(sched))) ||
> >                                        kthread_should_stop());
> >
> > -             if (cleanup_job) {
> > +             if (cleanup_job)
> >                       sched->ops->free_job(cleanup_job);
> > -                     /* queue timeout for next job */
> > -                     drm_sched_start_timeout(sched);
> > -             }
> >
> >               if (!entity)
> >                       continue;
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)
  2021-09-01 20:42   ` Alex Deucher
@ 2021-09-14 19:33     ` Alex Deucher
  2021-09-14 22:59       ` Grodzovsky, Andrey
  0 siblings, 1 reply; 8+ messages in thread
From: Alex Deucher @ 2021-09-14 19:33 UTC (permalink / raw)
  To: Christian König; +Cc: Monk Liu, amd-gfx list, Maling list - DRI developers

Was this fix independent of the other discussions?  Should this be
applied to drm-misc?

Alex

On Wed, Sep 1, 2021 at 4:42 PM Alex Deucher <alexdeucher@gmail.com> wrote:
>
> On Wed, Sep 1, 2021 at 2:50 AM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
> >
> > Am 01.09.21 um 02:46 schrieb Monk Liu:
> > > issue:
> > > in cleanup_job the cancle_delayed_work will cancel a TO timer
> > > even the its corresponding job is still running.
> > >
> > > fix:
> > > do not cancel the timer in cleanup_job, instead do the cancelling
> > > only when the heading job is signaled, and if there is a "next" job
> > > we start_timeout again.
> > >
> > > v2:
> > > further cleanup the logic, and do the TDR timer cancelling if the signaled job
> > > is the last one in its scheduler.
> > >
> > > v3:
> > > change the issue description
> > > remove the cancel_delayed_work in the begining of the cleanup_job
> > > recover the implement of drm_sched_job_begin.
> > >
> > > v4:
> > > remove the kthread_should_park() checking in cleanup_job routine,
> > > we should cleanup the signaled job asap
> > >
> > > TODO:
> > > 1)introduce pause/resume scheduler in job_timeout to serial the handling
> > > of scheduler and job_timeout.
> > > 2)drop the bad job's del and insert in scheduler due to above serialization
> > > (no race issue anymore with the serialization)
> > >
> > > tested-by: jingwen <jingwen.chen@@amd.com>
> > > Signed-off-by: Monk Liu <Monk.Liu@amd.com>
> >
> > Reviewed-by: Christian König <christian.koenig@amd.com>
> >
>
> Are you planning to push this to drm-misc?
>
> Alex
>
>
> > > ---
> > >   drivers/gpu/drm/scheduler/sched_main.c | 26 +++++++++-----------------
> > >   1 file changed, 9 insertions(+), 17 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > > index a2a9536..3e0bbc7 100644
> > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > @@ -676,15 +676,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> > >   {
> > >       struct drm_sched_job *job, *next;
> > >
> > > -     /*
> > > -      * Don't destroy jobs while the timeout worker is running  OR thread
> > > -      * is being parked and hence assumed to not touch pending_list
> > > -      */
> > > -     if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> > > -         !cancel_delayed_work(&sched->work_tdr)) ||
> > > -         kthread_should_park())
> > > -             return NULL;
> > > -
> > >       spin_lock(&sched->job_list_lock);
> > >
> > >       job = list_first_entry_or_null(&sched->pending_list,
> > > @@ -693,17 +684,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> > >       if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
> > >               /* remove job from pending_list */
> > >               list_del_init(&job->list);
> > > +
> > > +             /* cancel this job's TO timer */
> > > +             cancel_delayed_work(&sched->work_tdr);
> > >               /* make the scheduled timestamp more accurate */
> > >               next = list_first_entry_or_null(&sched->pending_list,
> > >                                               typeof(*next), list);
> > > -             if (next)
> > > +
> > > +             if (next) {
> > >                       next->s_fence->scheduled.timestamp =
> > >                               job->s_fence->finished.timestamp;
> > > -
> > > +                     /* start TO timer for next job */
> > > +                     drm_sched_start_timeout(sched);
> > > +             }
> > >       } else {
> > >               job = NULL;
> > > -             /* queue timeout for next job */
> > > -             drm_sched_start_timeout(sched);
> > >       }
> > >
> > >       spin_unlock(&sched->job_list_lock);
> > > @@ -791,11 +786,8 @@ static int drm_sched_main(void *param)
> > >                                         (entity = drm_sched_select_entity(sched))) ||
> > >                                        kthread_should_stop());
> > >
> > > -             if (cleanup_job) {
> > > +             if (cleanup_job)
> > >                       sched->ops->free_job(cleanup_job);
> > > -                     /* queue timeout for next job */
> > > -                     drm_sched_start_timeout(sched);
> > > -             }
> > >
> > >               if (!entity)
> > >                       continue;
> >

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)
  2021-09-14 19:33     ` Alex Deucher
@ 2021-09-14 22:59       ` Grodzovsky, Andrey
  2021-09-15 11:45         ` Christian König
  0 siblings, 1 reply; 8+ messages in thread
From: Grodzovsky, Andrey @ 2021-09-14 22:59 UTC (permalink / raw)
  To: Alex Deucher, Christian König
  Cc: Liu, Monk, amd-gfx list, Maling list - DRI developers

[-- Attachment #1: Type: text/plain, Size: 5045 bytes --]

AFAIK this one is independent.

Christian, can you confirm ?

Andrey
________________________________
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> on behalf of Alex Deucher <alexdeucher@gmail.com>
Sent: 14 September 2021 15:33
To: Christian König <ckoenig.leichtzumerken@gmail.com>
Cc: Liu, Monk <Monk.Liu@amd.com>; amd-gfx list <amd-gfx@lists.freedesktop.org>; Maling list - DRI developers <dri-devel@lists.freedesktop.org>
Subject: Re: [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)

Was this fix independent of the other discussions?  Should this be
applied to drm-misc?

Alex

On Wed, Sep 1, 2021 at 4:42 PM Alex Deucher <alexdeucher@gmail.com> wrote:
>
> On Wed, Sep 1, 2021 at 2:50 AM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
> >
> > Am 01.09.21 um 02:46 schrieb Monk Liu:
> > > issue:
> > > in cleanup_job the cancle_delayed_work will cancel a TO timer
> > > even the its corresponding job is still running.
> > >
> > > fix:
> > > do not cancel the timer in cleanup_job, instead do the cancelling
> > > only when the heading job is signaled, and if there is a "next" job
> > > we start_timeout again.
> > >
> > > v2:
> > > further cleanup the logic, and do the TDR timer cancelling if the signaled job
> > > is the last one in its scheduler.
> > >
> > > v3:
> > > change the issue description
> > > remove the cancel_delayed_work in the begining of the cleanup_job
> > > recover the implement of drm_sched_job_begin.
> > >
> > > v4:
> > > remove the kthread_should_park() checking in cleanup_job routine,
> > > we should cleanup the signaled job asap
> > >
> > > TODO:
> > > 1)introduce pause/resume scheduler in job_timeout to serial the handling
> > > of scheduler and job_timeout.
> > > 2)drop the bad job's del and insert in scheduler due to above serialization
> > > (no race issue anymore with the serialization)
> > >
> > > tested-by: jingwen <jingwen.chen@@amd.com>
> > > Signed-off-by: Monk Liu <Monk.Liu@amd.com>
> >
> > Reviewed-by: Christian König <christian.koenig@amd.com>
> >
>
> Are you planning to push this to drm-misc?
>
> Alex
>
>
> > > ---
> > >   drivers/gpu/drm/scheduler/sched_main.c | 26 +++++++++-----------------
> > >   1 file changed, 9 insertions(+), 17 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > > index a2a9536..3e0bbc7 100644
> > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > @@ -676,15 +676,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> > >   {
> > >       struct drm_sched_job *job, *next;
> > >
> > > -     /*
> > > -      * Don't destroy jobs while the timeout worker is running  OR thread
> > > -      * is being parked and hence assumed to not touch pending_list
> > > -      */
> > > -     if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> > > -         !cancel_delayed_work(&sched->work_tdr)) ||
> > > -         kthread_should_park())
> > > -             return NULL;
> > > -
> > >       spin_lock(&sched->job_list_lock);
> > >
> > >       job = list_first_entry_or_null(&sched->pending_list,
> > > @@ -693,17 +684,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> > >       if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
> > >               /* remove job from pending_list */
> > >               list_del_init(&job->list);
> > > +
> > > +             /* cancel this job's TO timer */
> > > +             cancel_delayed_work(&sched->work_tdr);
> > >               /* make the scheduled timestamp more accurate */
> > >               next = list_first_entry_or_null(&sched->pending_list,
> > >                                               typeof(*next), list);
> > > -             if (next)
> > > +
> > > +             if (next) {
> > >                       next->s_fence->scheduled.timestamp =
> > >                               job->s_fence->finished.timestamp;
> > > -
> > > +                     /* start TO timer for next job */
> > > +                     drm_sched_start_timeout(sched);
> > > +             }
> > >       } else {
> > >               job = NULL;
> > > -             /* queue timeout for next job */
> > > -             drm_sched_start_timeout(sched);
> > >       }
> > >
> > >       spin_unlock(&sched->job_list_lock);
> > > @@ -791,11 +786,8 @@ static int drm_sched_main(void *param)
> > >                                         (entity = drm_sched_select_entity(sched))) ||
> > >                                        kthread_should_stop());
> > >
> > > -             if (cleanup_job) {
> > > +             if (cleanup_job)
> > >                       sched->ops->free_job(cleanup_job);
> > > -                     /* queue timeout for next job */
> > > -                     drm_sched_start_timeout(sched);
> > > -             }
> > >
> > >               if (!entity)
> > >                       continue;
> >

[-- Attachment #2: Type: text/html, Size: 10135 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)
  2021-09-14 22:59       ` Grodzovsky, Andrey
@ 2021-09-15 11:45         ` Christian König
  2021-09-15 14:32           ` Andrey Grodzovsky
  0 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2021-09-15 11:45 UTC (permalink / raw)
  To: Grodzovsky, Andrey, Alex Deucher
  Cc: Liu, Monk, amd-gfx list, Maling list - DRI developers

[-- Attachment #1: Type: text/plain, Size: 5067 bytes --]

Yes, I think so as well. Andrey can you push this?

Christian.

Am 15.09.21 um 00:59 schrieb Grodzovsky, Andrey:
> AFAIK this one is independent.
>
> Christian, can you confirm ?
>
> Andrey
> ------------------------------------------------------------------------
> *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org> on behalf of 
> Alex Deucher <alexdeucher@gmail.com>
> *Sent:* 14 September 2021 15:33
> *To:* Christian König <ckoenig.leichtzumerken@gmail.com>
> *Cc:* Liu, Monk <Monk.Liu@amd.com>; amd-gfx list 
> <amd-gfx@lists.freedesktop.org>; Maling list - DRI developers 
> <dri-devel@lists.freedesktop.org>
> *Subject:* Re: [PATCH 1/2] drm/sched: fix the bug of time out 
> calculation(v4)
> Was this fix independent of the other discussions?  Should this be
> applied to drm-misc?
>
> Alex
>
> On Wed, Sep 1, 2021 at 4:42 PM Alex Deucher <alexdeucher@gmail.com> wrote:
> >
> > On Wed, Sep 1, 2021 at 2:50 AM Christian König
> > <ckoenig.leichtzumerken@gmail.com> wrote:
> > >
> > > Am 01.09.21 um 02:46 schrieb Monk Liu:
> > > > issue:
> > > > in cleanup_job the cancle_delayed_work will cancel a TO timer
> > > > even the its corresponding job is still running.
> > > >
> > > > fix:
> > > > do not cancel the timer in cleanup_job, instead do the cancelling
> > > > only when the heading job is signaled, and if there is a "next" job
> > > > we start_timeout again.
> > > >
> > > > v2:
> > > > further cleanup the logic, and do the TDR timer cancelling if 
> the signaled job
> > > > is the last one in its scheduler.
> > > >
> > > > v3:
> > > > change the issue description
> > > > remove the cancel_delayed_work in the begining of the cleanup_job
> > > > recover the implement of drm_sched_job_begin.
> > > >
> > > > v4:
> > > > remove the kthread_should_park() checking in cleanup_job routine,
> > > > we should cleanup the signaled job asap
> > > >
> > > > TODO:
> > > > 1)introduce pause/resume scheduler in job_timeout to serial the 
> handling
> > > > of scheduler and job_timeout.
> > > > 2)drop the bad job's del and insert in scheduler due to above 
> serialization
> > > > (no race issue anymore with the serialization)
> > > >
> > > > tested-by: jingwen <jingwen.chen@@amd.com>
> > > > Signed-off-by: Monk Liu <Monk.Liu@amd.com>
> > >
> > > Reviewed-by: Christian König <christian.koenig@amd.com>
> > >
> >
> > Are you planning to push this to drm-misc?
> >
> > Alex
> >
> >
> > > > ---
> > > >   drivers/gpu/drm/scheduler/sched_main.c | 26 
> +++++++++-----------------
> > > >   1 file changed, 9 insertions(+), 17 deletions(-)
> > > >
> > > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
> b/drivers/gpu/drm/scheduler/sched_main.c
> > > > index a2a9536..3e0bbc7 100644
> > > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > > @@ -676,15 +676,6 @@ drm_sched_get_cleanup_job(struct 
> drm_gpu_scheduler *sched)
> > > >   {
> > > >       struct drm_sched_job *job, *next;
> > > >
> > > > -     /*
> > > > -      * Don't destroy jobs while the timeout worker is running  
> OR thread
> > > > -      * is being parked and hence assumed to not touch pending_list
> > > > -      */
> > > > -     if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> > > > - !cancel_delayed_work(&sched->work_tdr)) ||
> > > > -         kthread_should_park())
> > > > -             return NULL;
> > > > -
> > > > spin_lock(&sched->job_list_lock);
> > > >
> > > >       job = list_first_entry_or_null(&sched->pending_list,
> > > > @@ -693,17 +684,21 @@ drm_sched_get_cleanup_job(struct 
> drm_gpu_scheduler *sched)
> > > >       if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
> > > >               /* remove job from pending_list */
> > > > list_del_init(&job->list);
> > > > +
> > > > +             /* cancel this job's TO timer */
> > > > + cancel_delayed_work(&sched->work_tdr);
> > > >               /* make the scheduled timestamp more accurate */
> > > >               next = list_first_entry_or_null(&sched->pending_list,
> > > > typeof(*next), list);
> > > > -             if (next)
> > > > +
> > > > +             if (next) {
> > > > next->s_fence->scheduled.timestamp =
> > > > job->s_fence->finished.timestamp;
> > > > -
> > > > +                     /* start TO timer for next job */
> > > > + drm_sched_start_timeout(sched);
> > > > +             }
> > > >       } else {
> > > >               job = NULL;
> > > > -             /* queue timeout for next job */
> > > > - drm_sched_start_timeout(sched);
> > > >       }
> > > >
> > > > spin_unlock(&sched->job_list_lock);
> > > > @@ -791,11 +786,8 @@ static int drm_sched_main(void *param)
> > > > (entity = drm_sched_select_entity(sched))) ||
> > > > kthread_should_stop());
> > > >
> > > > -             if (cleanup_job) {
> > > > +             if (cleanup_job)
> > > > sched->ops->free_job(cleanup_job);
> > > > -                     /* queue timeout for next job */
> > > > - drm_sched_start_timeout(sched);
> > > > -             }
> > > >
> > > >               if (!entity)
> > > >                       continue;
> > >


[-- Attachment #2: Type: text/html, Size: 10811 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4)
  2021-09-15 11:45         ` Christian König
@ 2021-09-15 14:32           ` Andrey Grodzovsky
  0 siblings, 0 replies; 8+ messages in thread
From: Andrey Grodzovsky @ 2021-09-15 14:32 UTC (permalink / raw)
  To: Christian König, Alex Deucher
  Cc: Liu, Monk, amd-gfx list, Maling list - DRI developers

[-- Attachment #1: Type: text/plain, Size: 5285 bytes --]

Pushed

Andrey

On 2021-09-15 7:45 a.m., Christian König wrote:
> Yes, I think so as well. Andrey can you push this?
>
> Christian.
>
> Am 15.09.21 um 00:59 schrieb Grodzovsky, Andrey:
>> AFAIK this one is independent.
>>
>> Christian, can you confirm ?
>>
>> Andrey
>> ------------------------------------------------------------------------
>> *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org> on behalf of 
>> Alex Deucher <alexdeucher@gmail.com>
>> *Sent:* 14 September 2021 15:33
>> *To:* Christian König <ckoenig.leichtzumerken@gmail.com>
>> *Cc:* Liu, Monk <Monk.Liu@amd.com>; amd-gfx list 
>> <amd-gfx@lists.freedesktop.org>; Maling list - DRI developers 
>> <dri-devel@lists.freedesktop.org>
>> *Subject:* Re: [PATCH 1/2] drm/sched: fix the bug of time out 
>> calculation(v4)
>> Was this fix independent of the other discussions?  Should this be
>> applied to drm-misc?
>>
>> Alex
>>
>> On Wed, Sep 1, 2021 at 4:42 PM Alex Deucher <alexdeucher@gmail.com> 
>> wrote:
>> >
>> > On Wed, Sep 1, 2021 at 2:50 AM Christian König
>> > <ckoenig.leichtzumerken@gmail.com> wrote:
>> > >
>> > > Am 01.09.21 um 02:46 schrieb Monk Liu:
>> > > > issue:
>> > > > in cleanup_job the cancle_delayed_work will cancel a TO timer
>> > > > even the its corresponding job is still running.
>> > > >
>> > > > fix:
>> > > > do not cancel the timer in cleanup_job, instead do the cancelling
>> > > > only when the heading job is signaled, and if there is a "next" job
>> > > > we start_timeout again.
>> > > >
>> > > > v2:
>> > > > further cleanup the logic, and do the TDR timer cancelling if 
>> the signaled job
>> > > > is the last one in its scheduler.
>> > > >
>> > > > v3:
>> > > > change the issue description
>> > > > remove the cancel_delayed_work in the begining of the cleanup_job
>> > > > recover the implement of drm_sched_job_begin.
>> > > >
>> > > > v4:
>> > > > remove the kthread_should_park() checking in cleanup_job routine,
>> > > > we should cleanup the signaled job asap
>> > > >
>> > > > TODO:
>> > > > 1)introduce pause/resume scheduler in job_timeout to serial the 
>> handling
>> > > > of scheduler and job_timeout.
>> > > > 2)drop the bad job's del and insert in scheduler due to above 
>> serialization
>> > > > (no race issue anymore with the serialization)
>> > > >
>> > > > tested-by: jingwen <jingwen.chen@@amd.com>
>> > > > Signed-off-by: Monk Liu <Monk.Liu@amd.com>
>> > >
>> > > Reviewed-by: Christian König <christian.koenig@amd.com>
>> > >
>> >
>> > Are you planning to push this to drm-misc?
>> >
>> > Alex
>> >
>> >
>> > > > ---
>> > > >   drivers/gpu/drm/scheduler/sched_main.c | 26 
>> +++++++++-----------------
>> > > >   1 file changed, 9 insertions(+), 17 deletions(-)
>> > > >
>> > > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
>> b/drivers/gpu/drm/scheduler/sched_main.c
>> > > > index a2a9536..3e0bbc7 100644
>> > > > --- a/drivers/gpu/drm/scheduler/sched_main.c
>> > > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
>> > > > @@ -676,15 +676,6 @@ drm_sched_get_cleanup_job(struct 
>> drm_gpu_scheduler *sched)
>> > > >   {
>> > > >       struct drm_sched_job *job, *next;
>> > > >
>> > > > -     /*
>> > > > -      * Don't destroy jobs while the timeout worker is 
>> running  OR thread
>> > > > -      * is being parked and hence assumed to not touch 
>> pending_list
>> > > > -      */
>> > > > -     if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
>> > > > - !cancel_delayed_work(&sched->work_tdr)) ||
>> > > > -         kthread_should_park())
>> > > > -             return NULL;
>> > > > -
>> > > > spin_lock(&sched->job_list_lock);
>> > > >
>> > > >       job = list_first_entry_or_null(&sched->pending_list,
>> > > > @@ -693,17 +684,21 @@ drm_sched_get_cleanup_job(struct 
>> drm_gpu_scheduler *sched)
>> > > >       if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
>> > > >               /* remove job from pending_list */
>> > > > list_del_init(&job->list);
>> > > > +
>> > > > +             /* cancel this job's TO timer */
>> > > > + cancel_delayed_work(&sched->work_tdr);
>> > > >               /* make the scheduled timestamp more accurate */
>> > > >               next = list_first_entry_or_null(&sched->pending_list,
>> > > > typeof(*next), list);
>> > > > -             if (next)
>> > > > +
>> > > > +             if (next) {
>> > > > next->s_fence->scheduled.timestamp =
>> > > > job->s_fence->finished.timestamp;
>> > > > -
>> > > > +                     /* start TO timer for next job */
>> > > > + drm_sched_start_timeout(sched);
>> > > > +             }
>> > > >       } else {
>> > > >               job = NULL;
>> > > > -             /* queue timeout for next job */
>> > > > - drm_sched_start_timeout(sched);
>> > > >       }
>> > > >
>> > > > spin_unlock(&sched->job_list_lock);
>> > > > @@ -791,11 +786,8 @@ static int drm_sched_main(void *param)
>> > > > (entity = drm_sched_select_entity(sched))) ||
>> > > > kthread_should_stop());
>> > > >
>> > > > -             if (cleanup_job) {
>> > > > +             if (cleanup_job)
>> > > > sched->ops->free_job(cleanup_job);
>> > > > -                     /* queue timeout for next job */
>> > > > - drm_sched_start_timeout(sched);
>> > > > -             }
>> > > >
>> > > >               if (!entity)
>> > > >                       continue;
>> > >
>

[-- Attachment #2: Type: text/html, Size: 14625 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-09-15 14:32 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-01  0:46 [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4) Monk Liu
2021-09-01  0:46 ` [PATCH 2/2] drm/sched: serialize job_timeout and scheduler Monk Liu
2021-09-01  6:50 ` [PATCH 1/2] drm/sched: fix the bug of time out calculation(v4) Christian König
2021-09-01 20:42   ` Alex Deucher
2021-09-14 19:33     ` Alex Deucher
2021-09-14 22:59       ` Grodzovsky, Andrey
2021-09-15 11:45         ` Christian König
2021-09-15 14:32           ` Andrey Grodzovsky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.