All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: DRI Development <dri-devel@lists.freedesktop.org>
Cc: "Intel Graphics Development" <intel-gfx@lists.freedesktop.org>,
	"Daniel Vetter" <daniel.vetter@ffwll.ch>,
	"Emma Anholt" <emma@anholt.net>,
	"Steven Price" <steven.price@arm.com>,
	"Daniel Vetter" <daniel.vetter@intel.com>,
	"Rob Herring" <robh@kernel.org>,
	"Tomeu Vizoso" <tomeu.vizoso@collabora.com>,
	"Alyssa Rosenzweig" <alyssa.rosenzweig@collabora.com>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Christian König" <christian.koenig@amd.com>,
	linux-media@vger.kernel.org, linaro-mm-sig@lists.linaro.org
Subject: [PATCH v5 07/20] drm/panfrost: use scheduler dependency tracking
Date: Thu,  5 Aug 2021 12:46:52 +0200	[thread overview]
Message-ID: <20210805104705.862416-8-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <20210805104705.862416-1-daniel.vetter@ffwll.ch>

Just deletes some code that's now more shared.

Note that thanks to the split into drm_sched_job_init/arm we can now
easily pull the _init() part from under the submission lock way ahead
where we're adding the sync file in-fences as dependencies.

v2: Correctly clean up the partially set up job, now that job_init()
and job_arm() are apart (Emma).

v3: Rebased over renamed functions for adding depdencies

Acked-by: Emma Anholt <emma@anholt.net>
Reviewed-by: Steven Price <steven.price@arm.com> (v3)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
---
 drivers/gpu/drm/panfrost/panfrost_drv.c | 16 ++++++++---
 drivers/gpu/drm/panfrost/panfrost_job.c | 38 ++++---------------------
 drivers/gpu/drm/panfrost/panfrost_job.h |  5 +---
 3 files changed, 18 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ffaef5ec5ff..16212b6b202e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
 		if (ret)
 			goto fail;
 
-		ret = drm_gem_fence_array_add(&job->deps, fence);
+		ret = drm_sched_job_add_dependency(&job->base, fence);
 
 		if (ret)
 			goto fail;
@@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 	struct drm_panfrost_submit *args = data;
 	struct drm_syncobj *sync_out = NULL;
 	struct panfrost_job *job;
-	int ret = 0;
+	int ret = 0, slot;
 
 	if (!args->jc)
 		return -EINVAL;
@@ -258,14 +258,20 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 
 	kref_init(&job->refcount);
 
-	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
-
 	job->pfdev = pfdev;
 	job->jc = args->jc;
 	job->requirements = args->requirements;
 	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
 	job->file_priv = file->driver_priv;
 
+	slot = panfrost_job_get_slot(job);
+
+	ret = drm_sched_job_init(&job->base,
+				 &job->file_priv->sched_entity[slot],
+				 NULL);
+	if (ret)
+		goto fail_job_put;
+
 	ret = panfrost_copy_in_sync(dev, file, args, job);
 	if (ret)
 		goto fail_job;
@@ -283,6 +289,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 		drm_syncobj_replace_fence(sync_out, job->render_done_fence);
 
 fail_job:
+	drm_sched_job_cleanup(&job->base);
+fail_job_put:
 	panfrost_job_put(job);
 fail_out_sync:
 	if (sync_out)
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 4bc962763e1f..a98f507dc779 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
 	return &fence->base;
 }
 
-static int panfrost_job_get_slot(struct panfrost_job *job)
+int panfrost_job_get_slot(struct panfrost_job *job)
 {
 	/* JS0: fragment jobs.
 	 * JS1: vertex/tiler jobs
@@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
 
 static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
 					  int bo_count,
-					  struct xarray *deps)
+					  struct drm_sched_job *job)
 {
 	int i, ret;
 
 	for (i = 0; i < bo_count; i++) {
 		/* panfrost always uses write mode in its current uapi */
-		ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
+		ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+							      true);
 		if (ret)
 			return ret;
 	}
@@ -269,31 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 int panfrost_job_push(struct panfrost_job *job)
 {
 	struct panfrost_device *pfdev = job->pfdev;
-	int slot = panfrost_job_get_slot(job);
-	struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
 	struct ww_acquire_ctx acquire_ctx;
 	int ret = 0;
 
-
 	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
 					    &acquire_ctx);
 	if (ret)
 		return ret;
 
 	mutex_lock(&pfdev->sched_lock);
-
-	ret = drm_sched_job_init(&job->base, entity, NULL);
-	if (ret) {
-		mutex_unlock(&pfdev->sched_lock);
-		goto unlock;
-	}
-
 	drm_sched_job_arm(&job->base);
 
 	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
 
 	ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
-					     &job->deps);
+					     &job->base);
 	if (ret) {
 		mutex_unlock(&pfdev->sched_lock);
 		goto unlock;
@@ -318,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref)
 {
 	struct panfrost_job *job = container_of(ref, struct panfrost_job,
 						refcount);
-	struct dma_fence *fence;
-	unsigned long index;
 	unsigned int i;
 
-	xa_for_each(&job->deps, index, fence) {
-		dma_fence_put(fence);
-	}
-	xa_destroy(&job->deps);
-
 	dma_fence_put(job->done_fence);
 	dma_fence_put(job->render_done_fence);
 
@@ -365,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job)
 	panfrost_job_put(job);
 }
 
-static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
-						 struct drm_sched_entity *s_entity)
-{
-	struct panfrost_job *job = to_panfrost_job(sched_job);
-
-	if (!xa_empty(&job->deps))
-		return xa_erase(&job->deps, job->last_dep++);
-
-	return NULL;
-}
-
 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
 {
 	struct panfrost_job *job = to_panfrost_job(sched_job);
@@ -765,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work)
 }
 
 static const struct drm_sched_backend_ops panfrost_sched_ops = {
-	.dependency = panfrost_job_dependency,
 	.run_job = panfrost_job_run,
 	.timedout_job = panfrost_job_timedout,
 	.free_job = panfrost_job_free
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 82306a03b57e..77e6d0e6f612 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -19,10 +19,6 @@ struct panfrost_job {
 	struct panfrost_device *pfdev;
 	struct panfrost_file_priv *file_priv;
 
-	/* Contains both explicit and implicit fences */
-	struct xarray deps;
-	unsigned long last_dep;
-
 	/* Fence to be signaled by IRQ handler when the job is complete. */
 	struct dma_fence *done_fence;
 
@@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev);
 void panfrost_job_fini(struct panfrost_device *pfdev);
 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_get_slot(struct panfrost_job *job);
 int panfrost_job_push(struct panfrost_job *job);
 void panfrost_job_put(struct panfrost_job *job);
 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
-- 
2.32.0


WARNING: multiple messages have this Message-ID (diff)
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: DRI Development <dri-devel@lists.freedesktop.org>
Cc: "Intel Graphics Development" <intel-gfx@lists.freedesktop.org>,
	"Daniel Vetter" <daniel.vetter@ffwll.ch>,
	"Emma Anholt" <emma@anholt.net>,
	"Steven Price" <steven.price@arm.com>,
	"Daniel Vetter" <daniel.vetter@intel.com>,
	"Rob Herring" <robh@kernel.org>,
	"Tomeu Vizoso" <tomeu.vizoso@collabora.com>,
	"Alyssa Rosenzweig" <alyssa.rosenzweig@collabora.com>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Christian König" <christian.koenig@amd.com>,
	linux-media@vger.kernel.org, linaro-mm-sig@lists.linaro.org
Subject: [Intel-gfx] [PATCH v5 07/20] drm/panfrost: use scheduler dependency tracking
Date: Thu,  5 Aug 2021 12:46:52 +0200	[thread overview]
Message-ID: <20210805104705.862416-8-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <20210805104705.862416-1-daniel.vetter@ffwll.ch>

Just deletes some code that's now more shared.

Note that thanks to the split into drm_sched_job_init/arm we can now
easily pull the _init() part from under the submission lock way ahead
where we're adding the sync file in-fences as dependencies.

v2: Correctly clean up the partially set up job, now that job_init()
and job_arm() are apart (Emma).

v3: Rebased over renamed functions for adding depdencies

Acked-by: Emma Anholt <emma@anholt.net>
Reviewed-by: Steven Price <steven.price@arm.com> (v3)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
---
 drivers/gpu/drm/panfrost/panfrost_drv.c | 16 ++++++++---
 drivers/gpu/drm/panfrost/panfrost_job.c | 38 ++++---------------------
 drivers/gpu/drm/panfrost/panfrost_job.h |  5 +---
 3 files changed, 18 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ffaef5ec5ff..16212b6b202e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
 		if (ret)
 			goto fail;
 
-		ret = drm_gem_fence_array_add(&job->deps, fence);
+		ret = drm_sched_job_add_dependency(&job->base, fence);
 
 		if (ret)
 			goto fail;
@@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 	struct drm_panfrost_submit *args = data;
 	struct drm_syncobj *sync_out = NULL;
 	struct panfrost_job *job;
-	int ret = 0;
+	int ret = 0, slot;
 
 	if (!args->jc)
 		return -EINVAL;
@@ -258,14 +258,20 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 
 	kref_init(&job->refcount);
 
-	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
-
 	job->pfdev = pfdev;
 	job->jc = args->jc;
 	job->requirements = args->requirements;
 	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
 	job->file_priv = file->driver_priv;
 
+	slot = panfrost_job_get_slot(job);
+
+	ret = drm_sched_job_init(&job->base,
+				 &job->file_priv->sched_entity[slot],
+				 NULL);
+	if (ret)
+		goto fail_job_put;
+
 	ret = panfrost_copy_in_sync(dev, file, args, job);
 	if (ret)
 		goto fail_job;
@@ -283,6 +289,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 		drm_syncobj_replace_fence(sync_out, job->render_done_fence);
 
 fail_job:
+	drm_sched_job_cleanup(&job->base);
+fail_job_put:
 	panfrost_job_put(job);
 fail_out_sync:
 	if (sync_out)
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 4bc962763e1f..a98f507dc779 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
 	return &fence->base;
 }
 
-static int panfrost_job_get_slot(struct panfrost_job *job)
+int panfrost_job_get_slot(struct panfrost_job *job)
 {
 	/* JS0: fragment jobs.
 	 * JS1: vertex/tiler jobs
@@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
 
 static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
 					  int bo_count,
-					  struct xarray *deps)
+					  struct drm_sched_job *job)
 {
 	int i, ret;
 
 	for (i = 0; i < bo_count; i++) {
 		/* panfrost always uses write mode in its current uapi */
-		ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
+		ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+							      true);
 		if (ret)
 			return ret;
 	}
@@ -269,31 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 int panfrost_job_push(struct panfrost_job *job)
 {
 	struct panfrost_device *pfdev = job->pfdev;
-	int slot = panfrost_job_get_slot(job);
-	struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
 	struct ww_acquire_ctx acquire_ctx;
 	int ret = 0;
 
-
 	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
 					    &acquire_ctx);
 	if (ret)
 		return ret;
 
 	mutex_lock(&pfdev->sched_lock);
-
-	ret = drm_sched_job_init(&job->base, entity, NULL);
-	if (ret) {
-		mutex_unlock(&pfdev->sched_lock);
-		goto unlock;
-	}
-
 	drm_sched_job_arm(&job->base);
 
 	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
 
 	ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
-					     &job->deps);
+					     &job->base);
 	if (ret) {
 		mutex_unlock(&pfdev->sched_lock);
 		goto unlock;
@@ -318,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref)
 {
 	struct panfrost_job *job = container_of(ref, struct panfrost_job,
 						refcount);
-	struct dma_fence *fence;
-	unsigned long index;
 	unsigned int i;
 
-	xa_for_each(&job->deps, index, fence) {
-		dma_fence_put(fence);
-	}
-	xa_destroy(&job->deps);
-
 	dma_fence_put(job->done_fence);
 	dma_fence_put(job->render_done_fence);
 
@@ -365,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job)
 	panfrost_job_put(job);
 }
 
-static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
-						 struct drm_sched_entity *s_entity)
-{
-	struct panfrost_job *job = to_panfrost_job(sched_job);
-
-	if (!xa_empty(&job->deps))
-		return xa_erase(&job->deps, job->last_dep++);
-
-	return NULL;
-}
-
 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
 {
 	struct panfrost_job *job = to_panfrost_job(sched_job);
@@ -765,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work)
 }
 
 static const struct drm_sched_backend_ops panfrost_sched_ops = {
-	.dependency = panfrost_job_dependency,
 	.run_job = panfrost_job_run,
 	.timedout_job = panfrost_job_timedout,
 	.free_job = panfrost_job_free
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 82306a03b57e..77e6d0e6f612 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -19,10 +19,6 @@ struct panfrost_job {
 	struct panfrost_device *pfdev;
 	struct panfrost_file_priv *file_priv;
 
-	/* Contains both explicit and implicit fences */
-	struct xarray deps;
-	unsigned long last_dep;
-
 	/* Fence to be signaled by IRQ handler when the job is complete. */
 	struct dma_fence *done_fence;
 
@@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev);
 void panfrost_job_fini(struct panfrost_device *pfdev);
 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_get_slot(struct panfrost_job *job);
 int panfrost_job_push(struct panfrost_job *job);
 void panfrost_job_put(struct panfrost_job *job);
 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
-- 
2.32.0


  parent reply	other threads:[~2021-08-05 10:47 UTC|newest]

Thread overview: 122+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-05 10:46 [PATCH v5 00/20] drm/sched dependency handling and implicit sync fixes Daniel Vetter
2021-08-05 10:46 ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 01/20] drm/sched: Split drm_sched_job_init Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:43   ` Christian König
2021-08-05 13:43     ` [Intel-gfx] " Christian König
2021-08-05 14:07     ` Daniel Vetter
2021-08-05 14:07       ` [Intel-gfx] " Daniel Vetter
2021-08-05 14:47       ` Christian König
2021-08-05 14:47         ` [Intel-gfx] " Christian König
2021-08-05 15:07         ` Daniel Vetter
2021-08-05 15:07           ` [Intel-gfx] " Daniel Vetter
2021-08-17  8:49   ` [PATCH] " Daniel Vetter
2021-08-17  8:49     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 02/20] drm/msm: Fix drm/sched point of no return rules Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 23:02   ` Rob Clark
2021-08-05 23:02     ` [Intel-gfx] " Rob Clark
2021-08-06 16:41     ` Daniel Vetter
2021-08-06 16:41       ` [Intel-gfx] " Daniel Vetter
2021-08-06 17:19       ` Rob Clark
2021-08-06 17:19         ` [Intel-gfx] " Rob Clark
2021-08-06 18:41         ` Daniel Vetter
2021-08-06 18:41           ` [Intel-gfx] " Daniel Vetter
2021-08-06 19:01           ` Rob Clark
2021-08-06 19:01             ` [Intel-gfx] " Rob Clark
2021-08-06 19:10             ` Daniel Vetter
2021-08-06 19:10               ` [Intel-gfx] " Daniel Vetter
2021-08-06 19:59               ` Rob Clark
2021-08-06 19:59                 ` [Intel-gfx] " Rob Clark
2021-08-17  8:53   ` [PATCH] drm/msm: Improve " Daniel Vetter
2021-08-17  8:53     ` [Intel-gfx] " Daniel Vetter
2021-08-26  9:33     ` Daniel Vetter
2021-08-26  9:33       ` [Intel-gfx] " Daniel Vetter
2021-08-26 15:38       ` Rob Clark
2021-08-26 15:38         ` Rob Clark
2021-08-05 10:46 ` [PATCH v5 03/20] drm/sched: Barriers are needed for entity->last_scheduled Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:45   ` Christian König
2021-08-05 13:45     ` [Intel-gfx] " Christian König
2021-08-05 10:46 ` [PATCH v5 04/20] drm/sched: Add dependency tracking Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:47   ` Christian König
2021-08-05 13:47     ` [Intel-gfx] " Christian König
2021-08-05 10:46 ` [PATCH v5 05/20] drm/sched: drop entity parameter from drm_sched_push_job Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:48   ` Christian König
2021-08-05 13:48     ` [Intel-gfx] " Christian König
2021-08-05 10:46 ` [PATCH v5 06/20] drm/sched: improve docs around drm_sched_entity Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` Daniel Vetter [this message]
2021-08-05 10:46   ` [Intel-gfx] [PATCH v5 07/20] drm/panfrost: use scheduler dependency tracking Daniel Vetter
2021-08-05 15:10   ` Alyssa Rosenzweig
2021-08-05 15:10     ` [Intel-gfx] " Alyssa Rosenzweig
2021-08-05 10:46 ` [PATCH v5 08/20] drm/lima: " Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-12 19:28   ` Daniel Vetter
2021-08-12 19:28     ` [Intel-gfx] " Daniel Vetter
2021-08-14  2:45     ` Qiang Yu
2021-08-14  2:45       ` [Intel-gfx] " Qiang Yu
2021-08-05 10:46 ` [PATCH v5 09/20] drm/v3d: Move drm_sched_job_init to v3d_job_init Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 10/20] drm/v3d: Use scheduler dependency handling Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 11/20] drm/etnaviv: " Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-12 19:28   ` Daniel Vetter
2021-08-12 19:28     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 12/20] drm/msm: " Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-12 19:29   ` Daniel Vetter
2021-08-12 19:29     ` [Intel-gfx] " Daniel Vetter
2021-08-26 16:12   ` Rob Clark
2021-08-26 16:12     ` [Intel-gfx] " Rob Clark
2021-08-30  9:01   ` Daniel Vetter
2021-08-30  9:01     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 13/20] drm/gem: Delete gem array fencing helpers Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-12 19:29   ` Daniel Vetter
2021-08-12 19:29     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 14/20] drm/sched: Don't store self-dependencies Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:18   ` Christian König
2021-08-05 13:18     ` [Intel-gfx] " Christian König
2021-08-05 13:25     ` Daniel Vetter
2021-08-05 13:25       ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:57       ` Christian König
2021-08-05 13:57         ` [Intel-gfx] " Christian König
2021-08-05 15:06         ` Daniel Vetter
2021-08-05 15:06           ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 15/20] drm/sched: Check locking in drm_sched_job_await_implicit Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:19   ` Christian König
2021-08-05 13:19     ` [Intel-gfx] " Christian König
2021-08-05 13:27     ` Daniel Vetter
2021-08-05 13:27       ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 16/20] drm/msm: Don't break exclusive fence ordering Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-26 16:16   ` Rob Clark
2021-08-26 16:16     ` [Intel-gfx] " Rob Clark
2021-08-26 16:16     ` Rob Clark
2021-08-30  9:02     ` Daniel Vetter
2021-08-30  9:02       ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 17/20] drm/etnaviv: " Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 18/20] drm/i915: delete exclude argument from i915_sw_fence_await_reservation Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 19/20] drm/i915: Don't break exclusive fence ordering Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 20/20] dma-resv: Give the docs a do-over Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-30 19:38   ` Daniel Vetter
2021-08-30 19:38     ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:58 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/sched dependency handling and implicit sync fixes Patchwork
2021-08-05 14:29 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2021-08-06 19:14 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for drm/sched dependency handling and implicit sync fixes (rev2) Patchwork
2021-08-17 16:27 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/sched dependency handling and implicit sync fixes (rev4) Patchwork
2021-08-17 16:57 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-08-17 18:15 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2021-08-26 13:17 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/sched dependency handling and implicit sync fixes (rev5) Patchwork
2021-08-26 13:48 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-08-26 21:46 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210805104705.862416-8-daniel.vetter@ffwll.ch \
    --to=daniel.vetter@ffwll.ch \
    --cc=alyssa.rosenzweig@collabora.com \
    --cc=christian.koenig@amd.com \
    --cc=daniel.vetter@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=emma@anholt.net \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-media@vger.kernel.org \
    --cc=robh@kernel.org \
    --cc=steven.price@arm.com \
    --cc=sumit.semwal@linaro.org \
    --cc=tomeu.vizoso@collabora.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.