All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: DRI Development <dri-devel@lists.freedesktop.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>,
	Daniel Vetter <daniel.vetter@intel.com>
Subject: [PATCH v2 07/11] drm/v3d: Use scheduler dependency handling
Date: Fri,  2 Jul 2021 23:38:11 +0200	[thread overview]
Message-ID: <20210702213815.2249499-8-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <20210702213815.2249499-1-daniel.vetter@ffwll.ch>

With the prep work out of the way this isn't tricky anymore.

Aside: The chaining of the various jobs is a bit awkward, with the
possibility of failure in bad places. I think with the
drm_sched_job_init/arm split and maybe preloading the
job->dependencies xarray this should be fixable.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
---
 drivers/gpu/drm/v3d/v3d_drv.h   |  5 -----
 drivers/gpu/drm/v3d/v3d_gem.c   | 25 ++++++++-----------------
 drivers/gpu/drm/v3d/v3d_sched.c | 29 +----------------------------
 3 files changed, 9 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 1d870261eaac..f80f4ff1f7aa 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -192,11 +192,6 @@ struct v3d_job {
 	struct drm_gem_object **bo;
 	u32 bo_count;
 
-	/* Array of struct dma_fence * to block on before submitting this job.
-	 */
-	struct xarray deps;
-	unsigned long last_dep;
-
 	/* v3d fence to be signaled by IRQ handler when the job is complete. */
 	struct dma_fence *irq_fence;
 
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5eccd3658938..42b07ffbea5e 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -257,8 +257,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
 		return ret;
 
 	for (i = 0; i < job->bo_count; i++) {
-		ret = drm_gem_fence_array_add_implicit(&job->deps,
-						       job->bo[i], true);
+		ret = drm_sched_job_await_implicit(&job->base,
+						   job->bo[i], true);
 		if (ret) {
 			drm_gem_unlock_reservations(job->bo, job->bo_count,
 						    acquire_ctx);
@@ -354,8 +354,6 @@ static void
 v3d_job_free(struct kref *ref)
 {
 	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
-	unsigned long index;
-	struct dma_fence *fence;
 	int i;
 
 	for (i = 0; i < job->bo_count; i++) {
@@ -364,11 +362,6 @@ v3d_job_free(struct kref *ref)
 	}
 	kvfree(job->bo);
 
-	xa_for_each(&job->deps, index, fence) {
-		dma_fence_put(fence);
-	}
-	xa_destroy(&job->deps);
-
 	dma_fence_put(job->irq_fence);
 	dma_fence_put(job->done_fence);
 
@@ -452,7 +445,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
 	if (ret < 0)
 		return ret;
 
-	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
 	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
 				 v3d_priv);
 	if (ret)
@@ -462,7 +454,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
 	if (ret == -EINVAL)
 		goto fail_job;
 
-	ret = drm_gem_fence_array_add(&job->deps, in_fence);
+	ret = drm_sched_job_await_fence(&job->base, in_fence);
 	if (ret)
 		goto fail_job;
 
@@ -472,7 +464,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
 fail_job:
 	drm_sched_job_cleanup(&job->base);
 fail:
-	xa_destroy(&job->deps);
 	pm_runtime_put_autosuspend(v3d->drm.dev);
 	return ret;
 }
@@ -619,8 +610,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (bin) {
 		v3d_push_job(&bin->base);
 
-		ret = drm_gem_fence_array_add(&render->base.deps,
-					      dma_fence_get(bin->base.done_fence));
+		ret = drm_sched_job_await_fence(&render->base.base,
+						dma_fence_get(bin->base.done_fence));
 		if (ret)
 			goto fail_unreserve;
 	}
@@ -630,7 +621,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (clean_job) {
 		struct dma_fence *render_fence =
 			dma_fence_get(render->base.done_fence);
-		ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+		ret = drm_sched_job_await_fence(&clean_job->base, render_fence);
 		if (ret)
 			goto fail_unreserve;
 		v3d_push_job(clean_job);
@@ -820,8 +811,8 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
 	mutex_lock(&v3d->sched_lock);
 	v3d_push_job(&job->base);
 
-	ret = drm_gem_fence_array_add(&clean_job->deps,
-				      dma_fence_get(job->base.done_fence));
+	ret = drm_sched_job_await_fence(&clean_job->base,
+					dma_fence_get(job->base.done_fence));
 	if (ret)
 		goto fail_unreserve;
 
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 3f352d73af9c..f0de584f452c 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -13,7 +13,7 @@
  * jobs when bulk background jobs are queued up, we submit a new job
  * to the HW only when it has completed the last one, instead of
  * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
- * v3d_job_dependency() to manage the dependency between bin and
+ * drm_sched_job_await_fence() to manage the dependency between bin and
  * render, instead of having the clients submit jobs using the HW's
  * semaphores to interlock between them.
  */
@@ -62,28 +62,6 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
 	v3d_job_cleanup(job);
 }
 
-/*
- * Returns the fences that the job depends on, one by one.
- *
- * If placed in the scheduler's .dependency method, the corresponding
- * .run_job won't be called until all of them have been signaled.
- */
-static struct dma_fence *
-v3d_job_dependency(struct drm_sched_job *sched_job,
-		   struct drm_sched_entity *s_entity)
-{
-	struct v3d_job *job = to_v3d_job(sched_job);
-
-	/* XXX: Wait on a fence for switching the GMP if necessary,
-	 * and then do so.
-	 */
-
-	if (!xa_empty(&job->deps))
-		return xa_erase(&job->deps, job->last_dep++);
-
-	return NULL;
-}
-
 static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
 {
 	struct v3d_bin_job *job = to_bin_job(sched_job);
@@ -356,35 +334,30 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
 }
 
 static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_bin_job_run,
 	.timedout_job = v3d_bin_job_timedout,
 	.free_job = v3d_sched_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_render_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_render_job_run,
 	.timedout_job = v3d_render_job_timedout,
 	.free_job = v3d_sched_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_tfu_job_run,
 	.timedout_job = v3d_generic_job_timedout,
 	.free_job = v3d_sched_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_csd_job_run,
 	.timedout_job = v3d_csd_job_timedout,
 	.free_job = v3d_sched_job_free
 };
 
 static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_cache_clean_job_run,
 	.timedout_job = v3d_generic_job_timedout,
 	.free_job = v3d_sched_job_free
-- 
2.32.0.rc2


  parent reply	other threads:[~2021-07-02 21:38 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-02 21:38 [PATCH v2 00/11] drm/scheduler dependency tracking Daniel Vetter
2021-07-02 21:38 ` [PATCH v2 01/11] drm/sched: Split drm_sched_job_init Daniel Vetter
2021-07-02 21:38   ` Daniel Vetter
2021-07-07  9:29   ` Christian König
2021-07-07  9:29     ` Christian König
2021-07-07 11:14     ` Daniel Vetter
2021-07-07 11:14       ` Daniel Vetter
2021-07-07 11:57       ` Christian König
2021-07-07 11:57         ` Christian König
2021-07-07 12:13         ` Daniel Vetter
2021-07-07 12:13           ` Daniel Vetter
2021-07-07 12:58           ` Christian König
2021-07-07 12:58             ` Christian König
2021-07-07 16:32             ` Daniel Vetter
2021-07-07 16:32               ` Daniel Vetter
2021-07-08  6:56               ` Christian König
2021-07-08  6:56                 ` Christian König
2021-07-08  7:09                 ` Daniel Vetter
2021-07-08  7:09                   ` Daniel Vetter
2021-07-08  7:19                   ` Daniel Vetter
2021-07-08  7:19                     ` Daniel Vetter
2021-07-08  7:53                     ` Christian König
2021-07-08  7:53                       ` Christian König
2021-07-08 10:02                       ` Daniel Vetter
2021-07-08 10:02                         ` Daniel Vetter
2021-07-08 10:54                         ` Christian König
2021-07-08 10:54                           ` Christian König
2021-07-08 11:20                           ` Daniel Vetter
2021-07-08 11:20                             ` Daniel Vetter
2021-07-08 11:28                             ` Christian König
2021-07-08 11:28                               ` Christian König
2021-07-02 21:38 ` [PATCH v2 02/11] drm/sched: Add dependency tracking Daniel Vetter
2021-07-02 21:38   ` Daniel Vetter
2021-07-07  9:26   ` [Linaro-mm-sig] " Christian König
2021-07-07  9:26     ` Christian König
2021-07-07 11:23     ` Daniel Vetter
2021-07-07 11:23       ` Daniel Vetter
2021-07-02 21:38 ` [PATCH v2 03/11] drm/sched: drop entity parameter from drm_sched_push_job Daniel Vetter
2021-07-02 21:38   ` Daniel Vetter
2021-07-02 21:38 ` [PATCH v2 04/11] drm/panfrost: use scheduler dependency tracking Daniel Vetter
2021-07-02 21:38   ` Daniel Vetter
2021-07-02 21:38 ` [PATCH v2 05/11] drm/lima: " Daniel Vetter
2021-07-02 21:38 ` [PATCH v2 06/11] drm/v3d: Move drm_sched_job_init to v3d_job_init Daniel Vetter
2021-07-02 21:38 ` Daniel Vetter [this message]
2021-07-02 21:38 ` [PATCH v2 08/11] drm/etnaviv: Use scheduler dependency handling Daniel Vetter
2021-07-02 21:38   ` Daniel Vetter
2021-07-07  9:08   ` Lucas Stach
2021-07-07  9:08     ` Lucas Stach
2021-07-07 11:26     ` Daniel Vetter
2021-07-07 11:26       ` Daniel Vetter
2021-07-07 11:32       ` Daniel Vetter
2021-07-07 11:32         ` Daniel Vetter
2021-07-07 12:34         ` Lucas Stach
2021-07-07 12:34           ` Lucas Stach
2021-07-02 21:38 ` [PATCH v2 09/11] drm/gem: Delete gem array fencing helpers Daniel Vetter
2021-07-02 21:38   ` Daniel Vetter
2021-07-02 21:38 ` [PATCH v2 10/11] drm/sched: Don't store self-dependencies Daniel Vetter
2021-07-02 21:38 ` [PATCH v2 11/11] drm/sched: Check locking in drm_sched_job_await_implicit Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210702213815.2249499-8-daniel.vetter@ffwll.ch \
    --to=daniel.vetter@ffwll.ch \
    --cc=daniel.vetter@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --subject='Re: [PATCH v2 07/11] drm/v3d: Use scheduler dependency handling' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.