amd-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: "Christian König" <christian.koenig@amd.com>
To: Luben Tuikov <luben.tuikov@amd.com>,
	Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>,
	Lucas Stach <l.stach@pengutronix.de>,
	Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Emily Deng <Emily.Deng@amd.com>,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	steven.price@arm.com
Subject: Re: [PATCH 4/6] drm/scheduler: Essentialize the job done callback
Date: Wed, 25 Nov 2020 10:51:58 +0100	[thread overview]
Message-ID: <ac9667db-0e78-9454-34ea-3e41ca6dbbd7@amd.com> (raw)
In-Reply-To: <20201125031708.6433-5-luben.tuikov@amd.com>

Am 25.11.20 um 04:17 schrieb Luben Tuikov:
> The job done callback is called from various
> places, in two ways: in job done role, and
> as a fence callback role.
>
> Essentialize the callback to an atom
> function to just complete the job,
> and into a second function as a prototype
> of fence callback which calls to complete
> the job.
>
> This is used in latter patches by the completion
> code.
>
> Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/scheduler/sched_main.c | 73 ++++++++++++++------------
>   1 file changed, 40 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index b694df12aaba..3eb7618a627d 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -60,8 +60,6 @@
>   #define to_drm_sched_job(sched_job)		\
>   		container_of((sched_job), struct drm_sched_job, queue_node)
>   
> -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
> -
>   /**
>    * drm_sched_rq_init - initialize a given run queue struct
>    *
> @@ -162,6 +160,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>   	return NULL;
>   }
>   
> +/**
> + * drm_sched_job_done - complete a job
> + * @s_job: pointer to the job which is done
> + *
> + * Finish the job's fence and wake up the worker thread.
> + */
> +static void drm_sched_job_done(struct drm_sched_job *s_job)
> +{
> +	struct drm_sched_fence *s_fence = s_job->s_fence;
> +	struct drm_gpu_scheduler *sched = s_fence->sched;
> +
> +	atomic_dec(&sched->hw_rq_count);
> +	atomic_dec(&sched->score);
> +
> +	trace_drm_sched_process_job(s_fence);
> +
> +	dma_fence_get(&s_fence->finished);
> +	drm_sched_fence_finished(s_fence);
> +	dma_fence_put(&s_fence->finished);
> +	wake_up_interruptible(&sched->wake_up_worker);
> +}
> +
> +/**
> + * drm_sched_job_done_cb - the callback for a done job
> + * @f: fence
> + * @cb: fence callbacks
> + */
> +static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
> +{
> +	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
> +
> +	drm_sched_job_done(s_job);
> +}
> +
>   /**
>    * drm_sched_dependency_optimized
>    *
> @@ -473,14 +505,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
>   
>   		if (fence) {
>   			r = dma_fence_add_callback(fence, &s_job->cb,
> -						   drm_sched_process_job);
> +						   drm_sched_job_done_cb);
>   			if (r == -ENOENT)
> -				drm_sched_process_job(fence, &s_job->cb);
> +				drm_sched_job_done(s_job);
>   			else if (r)
>   				DRM_ERROR("fence add callback failed (%d)\n",
>   					  r);
>   		} else
> -			drm_sched_process_job(NULL, &s_job->cb);
> +			drm_sched_job_done(s_job);
>   	}
>   
>   	if (full_recovery) {
> @@ -635,31 +667,6 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
>   	return entity;
>   }
>   
> -/**
> - * drm_sched_process_job - process a job
> - *
> - * @f: fence
> - * @cb: fence callbacks
> - *
> - * Called after job has finished execution.
> - */
> -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
> -{
> -	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
> -	struct drm_sched_fence *s_fence = s_job->s_fence;
> -	struct drm_gpu_scheduler *sched = s_fence->sched;
> -
> -	atomic_dec(&sched->hw_rq_count);
> -	atomic_dec(&sched->score);
> -
> -	trace_drm_sched_process_job(s_fence);
> -
> -	dma_fence_get(&s_fence->finished);
> -	drm_sched_fence_finished(s_fence);
> -	dma_fence_put(&s_fence->finished);
> -	wake_up_interruptible(&sched->wake_up_worker);
> -}
> -
>   /**
>    * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
>    *
> @@ -809,9 +816,9 @@ static int drm_sched_main(void *param)
>   		if (!IS_ERR_OR_NULL(fence)) {
>   			s_fence->parent = dma_fence_get(fence);
>   			r = dma_fence_add_callback(fence, &sched_job->cb,
> -						   drm_sched_process_job);
> +						   drm_sched_job_done_cb);
>   			if (r == -ENOENT)
> -				drm_sched_process_job(fence, &sched_job->cb);
> +				drm_sched_job_done(sched_job);
>   			else if (r)
>   				DRM_ERROR("fence add callback failed (%d)\n",
>   					  r);
> @@ -820,7 +827,7 @@ static int drm_sched_main(void *param)
>   			if (IS_ERR(fence))
>   				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
>   
> -			drm_sched_process_job(NULL, &sched_job->cb);
> +			drm_sched_job_done(sched_job);
>   		}
>   
>   		wake_up(&sched->job_scheduled);

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2020-11-25  9:52 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-25 20:51 [PATCH v4] drm/scheduler: Avoid accessing freed bad job Andrey Grodzovsky
2019-11-25 20:51 ` Andrey Grodzovsky
2019-11-25 21:44 ` Deng, Emily
2019-11-25 21:44   ` Deng, Emily
2019-11-26  0:09   ` Grodzovsky, Andrey
2019-11-26  0:09     ` Grodzovsky, Andrey
     [not found]     ` <MWHPR12MB1453C6FC45A83482232CA3EDEA450-Gy0DoCVfaSWZBIDmKHdw+wdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2019-11-26 15:36       ` Deucher, Alexander
2019-11-26 15:36         ` Deucher, Alexander
2019-11-26 15:37 ` Andrey Grodzovsky
2019-11-26 15:37   ` Andrey Grodzovsky
     [not found]   ` <b8b716a7-e235-38b2-ea6d-0a21881fa64e-5C7GfCeVMHo@public.gmane.org>
2019-11-27  0:41     ` Deng, Emily
2019-11-27  0:41       ` Deng, Emily
2019-12-02 19:24       ` Deng, Emily
2019-12-03 19:10         ` Andrey Grodzovsky
2019-12-03 19:44           ` Deucher, Alexander
2019-12-03 19:57             ` Andrey Grodzovsky
2019-12-03 19:59               ` Deucher, Alexander
2019-12-03 20:32                 ` Andrey Grodzovsky
2019-12-03 20:58                   ` Deng, Emily
2019-12-03 19:53           ` Deng, Emily
2020-02-05 18:24 ` Lucas Stach
2020-02-06 11:10   ` Lucas Stach
2020-02-06 11:49     ` Christian König
2020-02-06 14:49       ` Alex Deucher
2020-02-06 14:51         ` Christian König
2020-02-06 15:49           ` Andrey Grodzovsky
2020-02-10 16:55             ` Andrey Grodzovsky
2020-02-10 21:50               ` Luben Tuikov
2020-02-11 15:55                 ` Andrey Grodzovsky
2020-02-11 21:27                   ` Andrey Grodzovsky
2020-02-12  0:53                     ` Luben Tuikov
2020-02-12 16:33                       ` Andrey Grodzovsky
2020-07-21 11:03                         ` Lucas Stach
2020-07-21 13:36                           ` Andrey Grodzovsky
2020-07-21 13:39                             ` Christian König
2020-07-21 13:42                               ` Andrey Grodzovsky
2020-07-21 18:29                                 ` Luben Tuikov
2020-11-25  3:17                                   ` [PATCH 0/6] Allow to extend the timeout without jobs disappearing Luben Tuikov
2020-11-25  3:17                                     ` [PATCH 1/6] drm/scheduler: "node" --> "list" Luben Tuikov
2020-11-25  9:44                                       ` Christian König
2020-11-25  3:17                                     ` [PATCH 2/6] gpu/drm: ring_mirror_list --> pending_list Luben Tuikov
2020-11-25  9:47                                       ` Christian König
2020-11-25 16:42                                         ` Luben Tuikov
2020-11-25  3:17                                     ` [PATCH 3/6] drm/scheduler: Job timeout handler returns status Luben Tuikov
2020-11-25  4:41                                       ` kernel test robot
2020-11-25  9:50                                       ` Christian König
2020-11-25 16:48                                         ` Luben Tuikov
2020-11-25 11:04                                       ` Steven Price
2020-11-25 11:15                                         ` Lucas Stach
2020-11-25 11:22                                           ` Steven Price
2020-11-25 11:47                                             ` Lucas Stach
2020-11-25 12:41                                         ` Christian König
2020-11-26 15:06                                       ` Andrey Grodzovsky
2020-11-25  3:17                                     ` [PATCH 4/6] drm/scheduler: Essentialize the job done callback Luben Tuikov
2020-11-25  9:51                                       ` Christian König [this message]
2020-11-25  3:17                                     ` [PATCH 5/6] drm/amdgpu: Don't hardcode thread name length Luben Tuikov
2020-11-25  9:55                                       ` Christian König
2020-11-25 17:01                                         ` Luben Tuikov
2020-11-26  8:11                                           ` Christian König
2020-11-25  3:17                                     ` [PATCH 6/6] drm/sched: Make use of a "done" thread Luben Tuikov
2020-11-25 10:10                                       ` Christian König
2020-11-26  0:24                                         ` Luben Tuikov
2020-11-25 11:09                                       ` Steven Price
2020-11-26  0:30                                         ` Luben Tuikov
2020-02-07 15:26           ` [PATCH v4] drm/scheduler: Avoid accessing freed bad job Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ac9667db-0e78-9454-34ea-3e41ca6dbbd7@amd.com \
    --to=christian.koenig@amd.com \
    --cc=Alexander.Deucher@amd.com \
    --cc=Andrey.Grodzovsky@amd.com \
    --cc=Emily.Deng@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=l.stach@pengutronix.de \
    --cc=luben.tuikov@amd.com \
    --cc=steven.price@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).