intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Melissa Wen <mwen@igalia.com>
To: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Melissa Wen <melissa.srw@gmail.com>,
	Daniel Vetter <daniel.vetter@intel.com>,
	Intel Graphics Development <intel-gfx@lists.freedesktop.org>,
	"Cc : Emma Anholt" <emma@anholt.net>,
	DRI Development <dri-devel@lists.freedesktop.org>
Subject: Re: [Intel-gfx] [PATCH v4 09/18] drm/v3d: Use scheduler dependency handling
Date: Wed, 14 Jul 2021 10:37:11 +0100	[thread overview]
Message-ID: <20210714093711.4cuqwfsq46vqnghi@mail.igalia.com> (raw)
In-Reply-To: <20210712175352.802687-10-daniel.vetter@ffwll.ch>


[-- Attachment #1.1: Type: text/plain, Size: 7422 bytes --]

On 07/12, Daniel Vetter wrote:
> With the prep work out of the way this isn't tricky anymore.
> 
> Aside: The chaining of the various jobs is a bit awkward, with the
> possibility of failure in bad places. I think with the
> drm_sched_job_init/arm split and maybe preloading the
> job->dependencies xarray this should be fixable.
> 
> Cc: Melissa Wen <melissa.srw@gmail.com>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Cc: Emma Anholt <emma@anholt.net>
> ---
>  drivers/gpu/drm/v3d/v3d_drv.h   |  5 -----
>  drivers/gpu/drm/v3d/v3d_gem.c   | 25 ++++++++-----------------
>  drivers/gpu/drm/v3d/v3d_sched.c | 29 +----------------------------
>  3 files changed, 9 insertions(+), 50 deletions(-)
> 
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
> index 1d870261eaac..f80f4ff1f7aa 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.h
> +++ b/drivers/gpu/drm/v3d/v3d_drv.h
> @@ -192,11 +192,6 @@ struct v3d_job {
>  	struct drm_gem_object **bo;
>  	u32 bo_count;
>  
> -	/* Array of struct dma_fence * to block on before submitting this job.
> -	 */
> -	struct xarray deps;
> -	unsigned long last_dep;
> -
>  	/* v3d fence to be signaled by IRQ handler when the job is complete. */
>  	struct dma_fence *irq_fence;
>  
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index 5eccd3658938..42b07ffbea5e 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -257,8 +257,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
>  		return ret;
>  
>  	for (i = 0; i < job->bo_count; i++) {
> -		ret = drm_gem_fence_array_add_implicit(&job->deps,
> -						       job->bo[i], true);
> +		ret = drm_sched_job_await_implicit(&job->base,
> +						   job->bo[i], true);
>  		if (ret) {
>  			drm_gem_unlock_reservations(job->bo, job->bo_count,
>  						    acquire_ctx);
> @@ -354,8 +354,6 @@ static void
>  v3d_job_free(struct kref *ref)
>  {
>  	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
> -	unsigned long index;
> -	struct dma_fence *fence;
>  	int i;
>  
>  	for (i = 0; i < job->bo_count; i++) {
> @@ -364,11 +362,6 @@ v3d_job_free(struct kref *ref)
>  	}
>  	kvfree(job->bo);
>  
> -	xa_for_each(&job->deps, index, fence) {
> -		dma_fence_put(fence);
> -	}
> -	xa_destroy(&job->deps);
> -
>  	dma_fence_put(job->irq_fence);
>  	dma_fence_put(job->done_fence);
>  
> @@ -452,7 +445,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
>  	if (ret < 0)
>  		return ret;
>  
> -	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
>  	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
>  				 v3d_priv);
>  	if (ret)
> @@ -462,7 +454,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
>  	if (ret == -EINVAL)
>  		goto fail_job;
>  
> -	ret = drm_gem_fence_array_add(&job->deps, in_fence);
> +	ret = drm_sched_job_await_fence(&job->base, in_fence);
>  	if (ret)
>  		goto fail_job;
>  
> @@ -472,7 +464,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
>  fail_job:
>  	drm_sched_job_cleanup(&job->base);
>  fail:
> -	xa_destroy(&job->deps);
>  	pm_runtime_put_autosuspend(v3d->drm.dev);
>  	return ret;
>  }
> @@ -619,8 +610,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
>  	if (bin) {
>  		v3d_push_job(&bin->base);
>  
> -		ret = drm_gem_fence_array_add(&render->base.deps,
> -					      dma_fence_get(bin->base.done_fence));
> +		ret = drm_sched_job_await_fence(&render->base.base,
> +						dma_fence_get(bin->base.done_fence));
>  		if (ret)
>  			goto fail_unreserve;
>  	}
> @@ -630,7 +621,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
>  	if (clean_job) {
>  		struct dma_fence *render_fence =
>  			dma_fence_get(render->base.done_fence);
> -		ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
> +		ret = drm_sched_job_await_fence(&clean_job->base, render_fence);
>  		if (ret)
>  			goto fail_unreserve;
>  		v3d_push_job(clean_job);
> @@ -820,8 +811,8 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
>  	mutex_lock(&v3d->sched_lock);
>  	v3d_push_job(&job->base);
>  
> -	ret = drm_gem_fence_array_add(&clean_job->deps,
> -				      dma_fence_get(job->base.done_fence));
> +	ret = drm_sched_job_await_fence(&clean_job->base,
> +					dma_fence_get(job->base.done_fence));
>  	if (ret)
>  		goto fail_unreserve;
>  
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index 3f352d73af9c..f0de584f452c 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -13,7 +13,7 @@
>   * jobs when bulk background jobs are queued up, we submit a new job
>   * to the HW only when it has completed the last one, instead of
>   * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
> - * v3d_job_dependency() to manage the dependency between bin and
> + * drm_sched_job_await_fence() to manage the dependency between bin and
>   * render, instead of having the clients submit jobs using the HW's
>   * semaphores to interlock between them.
>   */
> @@ -62,28 +62,6 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
>  	v3d_job_cleanup(job);
>  }
>  
> -/*
> - * Returns the fences that the job depends on, one by one.
> - *
> - * If placed in the scheduler's .dependency method, the corresponding
> - * .run_job won't be called until all of them have been signaled.
> - */
> -static struct dma_fence *
> -v3d_job_dependency(struct drm_sched_job *sched_job,
> -		   struct drm_sched_entity *s_entity)
> -{
> -	struct v3d_job *job = to_v3d_job(sched_job);
> -
> -	/* XXX: Wait on a fence for switching the GMP if necessary,
> -	 * and then do so.
> -	 */
> -
> -	if (!xa_empty(&job->deps))
> -		return xa_erase(&job->deps, job->last_dep++);
> -
> -	return NULL;
> -}
> -
>  static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
>  {
>  	struct v3d_bin_job *job = to_bin_job(sched_job);
> @@ -356,35 +334,30 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
>  }
>  
>  static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
> -	.dependency = v3d_job_dependency,
>  	.run_job = v3d_bin_job_run,
>  	.timedout_job = v3d_bin_job_timedout,
>  	.free_job = v3d_sched_job_free,
>  };
>  
>  static const struct drm_sched_backend_ops v3d_render_sched_ops = {
> -	.dependency = v3d_job_dependency,
>  	.run_job = v3d_render_job_run,
>  	.timedout_job = v3d_render_job_timedout,
>  	.free_job = v3d_sched_job_free,
>  };
>  
>  static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
> -	.dependency = v3d_job_dependency,
>  	.run_job = v3d_tfu_job_run,
>  	.timedout_job = v3d_generic_job_timedout,
>  	.free_job = v3d_sched_job_free,
>  };
>  
>  static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
> -	.dependency = v3d_job_dependency,
>  	.run_job = v3d_csd_job_run,
>  	.timedout_job = v3d_csd_job_timedout,
>  	.free_job = v3d_sched_job_free
>  };
>  
>  static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
> -	.dependency = v3d_job_dependency,
>  	.run_job = v3d_cache_clean_job_run,
>  	.timedout_job = v3d_generic_job_timedout,
>  	.free_job = v3d_sched_job_free
Also here.

Reviewed-by: Melissa Wen <mwen@igalia.com>
> -- 
> 2.32.0
> 

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2021-07-14  9:37 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-12 17:53 [Intel-gfx] [PATCH v4 00/18] drm/sched dependency tracking and dma-resv fixes Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 01/18] drm/sched: Split drm_sched_job_init Daniel Vetter
2021-07-12 20:22   ` Emma Anholt
2021-07-13  6:40   ` Christian König
2021-07-13  6:53     ` Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 02/18] drm/sched: Barriers are needed for entity->last_scheduled Daniel Vetter
2021-07-13  6:35   ` Christian König
2021-07-13  6:50     ` Daniel Vetter
2021-07-13  7:25       ` Christian König
2021-07-13  9:10         ` Daniel Vetter
2021-07-13 11:20           ` Christian König
2021-07-13 16:11           ` Andrey Grodzovsky
2021-07-13 16:45             ` Daniel Vetter
2021-07-14 22:12               ` Andrey Grodzovsky
2021-07-15 10:16                 ` Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 03/18] drm/sched: Add dependency tracking Daniel Vetter
2021-07-27 11:09   ` Daniel Vetter
2021-07-28 11:28     ` [Intel-gfx] [Linaro-mm-sig] " Christian König
2021-07-28 12:09       ` Daniel Vetter
2021-07-28 12:46         ` Christian König
2021-07-28 15:20         ` Melissa Wen
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 04/18] drm/sched: drop entity parameter from drm_sched_push_job Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 05/18] drm/sched: improve docs around drm_sched_entity Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 06/18] drm/panfrost: use scheduler dependency tracking Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 07/18] drm/lima: " Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 08/18] drm/v3d: Move drm_sched_job_init to v3d_job_init Daniel Vetter
2021-07-14  9:34   ` Melissa Wen
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 09/18] drm/v3d: Use scheduler dependency handling Daniel Vetter
2021-07-14  9:37   ` Melissa Wen [this message]
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 10/18] drm/etnaviv: " Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 11/18] drm/gem: Delete gem array fencing helpers Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 12/18] drm/sched: Don't store self-dependencies Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 13/18] drm/sched: Check locking in drm_sched_job_await_implicit Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 14/18] drm/msm: Don't break exclusive fence ordering Daniel Vetter
2021-07-13 16:55   ` Rob Clark
2021-07-13 16:58     ` Daniel Vetter
2021-07-13 17:46       ` Rob Clark
2021-07-13 17:45         ` Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 15/18] drm/etnaviv: " Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 16/18] drm/i915: delete exclude argument from i915_sw_fence_await_reservation Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 17/18] drm/i915: Don't break exclusive fence ordering Daniel Vetter
2021-07-12 17:53 ` [Intel-gfx] [PATCH v4 18/18] dma-resv: Give the docs a do-over Daniel Vetter
2021-07-12 20:47 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/sched dependency tracking and dma-resv fixes (rev3) Patchwork
2021-07-12 21:13 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-07-12 23:43 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2021-07-27 11:51 ` [Intel-gfx] [PATCH v4 00/18] drm/sched dependency tracking and dma-resv fixes Boris Brezillon
2021-07-27 14:47 ` Melissa Wen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210714093711.4cuqwfsq46vqnghi@mail.igalia.com \
    --to=mwen@igalia.com \
    --cc=daniel.vetter@ffwll.ch \
    --cc=daniel.vetter@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=emma@anholt.net \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=melissa.srw@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).