All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: DRI Development <dri-devel@lists.freedesktop.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>,
	Daniel Vetter <daniel.vetter@intel.com>
Subject: [PATCH 07/11] drm/v3d: Use scheduler dependency handling
Date: Thu, 24 Jun 2021 16:00:21 +0200	[thread overview]
Message-ID: <20210624140025.438303-8-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <20210624140025.438303-1-daniel.vetter@ffwll.ch>

With the prep work out of the way this isn't tricky anymore.

Aside: The chaining of the various jobs is a bit awkward, with the
possibility of failure in bad places. I think with the
drm_sched_job_init/arm split and maybe preloading the
job->dependencies xarray this should be fixable.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
---
 drivers/gpu/drm/v3d/v3d_drv.h   |  5 -----
 drivers/gpu/drm/v3d/v3d_gem.c   | 25 ++++++++-----------------
 drivers/gpu/drm/v3d/v3d_sched.c | 29 +----------------------------
 3 files changed, 9 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 8a390738d65b..73559eb368a7 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -192,11 +192,6 @@ struct v3d_job {
 	struct drm_gem_object **bo;
 	u32 bo_count;
 
-	/* Array of struct dma_fence * to block on before submitting this job.
-	 */
-	struct xarray deps;
-	unsigned long last_dep;
-
 	/* v3d fence to be signaled by IRQ handler when the job is complete. */
 	struct dma_fence *irq_fence;
 
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 40302c77e667..c54684f8b877 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -257,8 +257,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
 		return ret;
 
 	for (i = 0; i < job->bo_count; i++) {
-		ret = drm_gem_fence_array_add_implicit(&job->deps,
-						       job->bo[i], true);
+		ret = drm_sched_job_await_implicit(&job->base,
+						   job->bo[i], true);
 		if (ret) {
 			drm_gem_unlock_reservations(job->bo, job->bo_count,
 						    acquire_ctx);
@@ -354,8 +354,6 @@ static void
 v3d_job_free(struct kref *ref)
 {
 	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
-	unsigned long index;
-	struct dma_fence *fence;
 	int i;
 
 	for (i = 0; i < job->bo_count; i++) {
@@ -364,11 +362,6 @@ v3d_job_free(struct kref *ref)
 	}
 	kvfree(job->bo);
 
-	xa_for_each(&job->deps, index, fence) {
-		dma_fence_put(fence);
-	}
-	xa_destroy(&job->deps);
-
 	dma_fence_put(job->irq_fence);
 	dma_fence_put(job->done_fence);
 
@@ -446,7 +439,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
 	if (ret < 0)
 		return ret;
 
-	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
 	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
 				 v3d_priv);
 	if (ret)
@@ -456,7 +448,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
 	if (ret == -EINVAL)
 		goto fail;
 
-	ret = drm_gem_fence_array_add(&job->deps, in_fence);
+	ret = drm_sched_job_await_fence(&job->base, in_fence);
 	if (ret)
 		goto fail;
 
@@ -464,7 +456,6 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
 
 	return 0;
 fail:
-	xa_destroy(&job->deps);
 	pm_runtime_put_autosuspend(v3d->drm.dev);
 	return ret;
 }
@@ -609,8 +600,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (bin) {
 		v3d_push_job(&bin->base);
 
-		ret = drm_gem_fence_array_add(&render->base.deps,
-					      dma_fence_get(bin->base.done_fence));
+		ret = drm_sched_job_await_fence(&render->base.base,
+						dma_fence_get(bin->base.done_fence));
 		if (ret)
 			goto fail_unreserve;
 	}
@@ -620,7 +611,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (clean_job) {
 		struct dma_fence *render_fence =
 			dma_fence_get(render->base.done_fence);
-		ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+		ret = drm_sched_job_await_fence(&clean_job->base, render_fence);
 		if (ret)
 			goto fail_unreserve;
 		v3d_push_job(clean_job);
@@ -810,8 +801,8 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
 	mutex_lock(&v3d->sched_lock);
 	v3d_push_job(&job->base);
 
-	ret = drm_gem_fence_array_add(&clean_job->deps,
-				      dma_fence_get(job->base.done_fence));
+	ret = drm_sched_job_await_fence(&clean_job->base,
+					dma_fence_get(job->base.done_fence));
 	if (ret)
 		goto fail_unreserve;
 
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 8992480c88fa..3af6a09764c7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -13,7 +13,7 @@
  * jobs when bulk background jobs are queued up, we submit a new job
  * to the HW only when it has completed the last one, instead of
  * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
- * v3d_job_dependency() to manage the dependency between bin and
+ * drm_sched_job_await_fence() to manage the dependency between bin and
  * render, instead of having the clients submit jobs using the HW's
  * semaphores to interlock between them.
  */
@@ -63,28 +63,6 @@ v3d_job_free(struct drm_sched_job *sched_job)
 	v3d_job_put(job);
 }
 
-/*
- * Returns the fences that the job depends on, one by one.
- *
- * If placed in the scheduler's .dependency method, the corresponding
- * .run_job won't be called until all of them have been signaled.
- */
-static struct dma_fence *
-v3d_job_dependency(struct drm_sched_job *sched_job,
-		   struct drm_sched_entity *s_entity)
-{
-	struct v3d_job *job = to_v3d_job(sched_job);
-
-	/* XXX: Wait on a fence for switching the GMP if necessary,
-	 * and then do so.
-	 */
-
-	if (!xa_empty(&job->deps))
-		return xa_erase(&job->deps, job->last_dep++);
-
-	return NULL;
-}
-
 static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
 {
 	struct v3d_bin_job *job = to_bin_job(sched_job);
@@ -357,35 +335,30 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
 }
 
 static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_bin_job_run,
 	.timedout_job = v3d_bin_job_timedout,
 	.free_job = v3d_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_render_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_render_job_run,
 	.timedout_job = v3d_render_job_timedout,
 	.free_job = v3d_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_tfu_job_run,
 	.timedout_job = v3d_generic_job_timedout,
 	.free_job = v3d_job_free,
 };
 
 static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_csd_job_run,
 	.timedout_job = v3d_csd_job_timedout,
 	.free_job = v3d_job_free
 };
 
 static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
-	.dependency = v3d_job_dependency,
 	.run_job = v3d_cache_clean_job_run,
 	.timedout_job = v3d_generic_job_timedout,
 	.free_job = v3d_job_free
-- 
2.32.0.rc2


  parent reply	other threads:[~2021-06-24 14:01 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-24 14:00 [PATCH 00/11] drm/scheduler dependency tracking Daniel Vetter
2021-06-24 14:00 ` [PATCH 01/11] drm/sched: Split drm_sched_job_init Daniel Vetter
2021-06-24 14:00   ` Daniel Vetter
2021-06-24 14:32   ` Steven Price
2021-06-24 14:32     ` Steven Price
2021-06-24 17:29   ` Christian König
2021-06-24 17:29     ` Christian König
2021-06-24 17:37     ` Daniel Vetter
2021-06-24 17:37       ` Daniel Vetter
2021-06-24 17:39       ` Christian König
2021-06-24 17:39         ` Christian König
2021-06-24 18:22         ` Daniel Vetter
2021-06-24 18:22           ` Daniel Vetter
2021-06-24 20:45   ` [PATCH] " Daniel Vetter
2021-06-24 20:45     ` Daniel Vetter
2021-06-24 21:00     ` Emma Anholt
2021-06-24 21:00       ` Emma Anholt
2021-06-24 21:30       ` Daniel Vetter
2021-06-24 21:30         ` Daniel Vetter
2021-06-24 14:00 ` [PATCH 02/11] drm/sched: Add dependency tracking Daniel Vetter
2021-06-24 14:00   ` Daniel Vetter
2021-06-24 14:32   ` Steven Price
2021-06-24 14:32     ` Steven Price
2021-06-24 14:39   ` Lucas Stach
2021-06-24 14:39     ` Lucas Stach
2021-06-24 15:26     ` Daniel Vetter
2021-06-24 15:26       ` Daniel Vetter
2021-06-24 16:59     ` Christian König
2021-06-24 16:59       ` Christian König
2021-06-24 14:00 ` [PATCH 03/11] drm/sched: drop entity parameter from drm_sched_push_job Daniel Vetter
2021-06-24 14:00   ` Daniel Vetter
2021-06-24 14:32   ` Steven Price
2021-06-24 14:32     ` Steven Price
2021-06-24 14:00 ` [PATCH 04/11] drm/panfrost: use scheduler dependency tracking Daniel Vetter
2021-06-24 14:00   ` Daniel Vetter
2021-06-24 14:32   ` Steven Price
2021-06-24 14:32     ` Steven Price
2021-06-24 14:00 ` [PATCH 05/11] drm/lima: " Daniel Vetter
2021-06-24 14:00 ` [PATCH 06/11] drm/v3d: Move drm_sched_job_init to v3d_job_init Daniel Vetter
2021-06-24 16:59   ` Emma Anholt
2021-06-24 17:24     ` Daniel Vetter
2021-06-24 20:45   ` [PATCH] " Daniel Vetter
2021-06-24 20:49     ` Emma Anholt
2021-06-24 14:00 ` Daniel Vetter [this message]
2021-06-24 14:00 ` [PATCH 08/11] drm/etnaviv: Use scheduler dependency handling Daniel Vetter
2021-06-24 14:00   ` Daniel Vetter
2021-06-24 14:00 ` [PATCH 09/11] drm/gem: Delete gem array fencing helpers Daniel Vetter
2021-06-24 14:00   ` Daniel Vetter
2021-06-24 14:00 ` [PATCH 10/11] drm/scheduler: Don't store self-dependencies Daniel Vetter
2021-06-24 14:42   ` Lucas Stach
2021-06-24 17:03   ` Christian König
2021-06-24 17:29     ` Daniel Vetter
2021-06-24 17:38       ` Christian König
2021-06-24 17:43         ` Daniel Vetter
2021-06-24 17:56           ` Christian König
2021-06-24 18:21             ` Daniel Vetter
2021-06-24 14:00 ` [PATCH 11/11] drm/sched: Check locking in drm_sched_job_await_implicit Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210624140025.438303-8-daniel.vetter@ffwll.ch \
    --to=daniel.vetter@ffwll.ch \
    --cc=daniel.vetter@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.