All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
@ 2020-06-09 10:57 Marek Olšák
  2020-06-10  6:29 ` Zhou, David(ChunMing)
  0 siblings, 1 reply; 8+ messages in thread
From: Marek Olšák @ 2020-06-09 10:57 UTC (permalink / raw)
  To: amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 223 bytes --]

Hi,

This enables a full pipeline sync for implicit sync. It's Christian's patch
with the driver version bumped. With this, user mode drivers don't have to
wait for idle at the end of gfx IBs.

Any concerns?

Thanks,
Marek

[-- Attachment #1.2: Type: text/html, Size: 362 bytes --]

[-- Attachment #2: 0001-drm-amdgpu-remove-distinction-between-explicit-and-i.patch --]
[-- Type: text/x-patch, Size: 14013 bytes --]

From 2216e2db0994f1fdeb74353bd669f8981280188e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Wed, 27 May 2020 10:31:08 +0200
Subject: [PATCH] drm/amdgpu: remove distinction between explicit and implicit
 sync (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

According to Marek a pipeline sync should be inserted for implicit syncs well.

v2: bump the driver version

Signed-off-by: Christian König <christian.koenig@amd.com>
Tested-by: Marek Olšák <marek.olsak@amd.com>
Signed-off-by: Marek Olšák <marek.olsak@amd.com>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  8 ++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c        |  4 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c       |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c        |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c       | 12 +++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c       | 15 ++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c      | 31 ++++++-------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h      |  6 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c   |  2 +-
 9 files changed, 33 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 68e6e1bc8f3a..c408936e8f98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -395,7 +395,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 	if (ret)
 		return ret;
 
-	return amdgpu_sync_fence(sync, vm->last_update, false);
+	return amdgpu_sync_fence(sync, vm->last_update);
 }
 
 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
@@ -785,7 +785,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
 
 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
 
-	amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
+	amdgpu_sync_fence(sync, bo_va->last_pt_update);
 
 	return 0;
 }
@@ -804,7 +804,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
 		return ret;
 	}
 
-	return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
+	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
 }
 
 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -2102,7 +2102,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
 			goto validate_map_fail;
 		}
-		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
+		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
 		if (ret) {
 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
 			goto validate_map_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 19070226a945..ffbcaf4bfb8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -992,7 +992,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
 			dma_fence_put(old);
 		}
 
-		r = amdgpu_sync_fence(&p->job->sync, fence, true);
+		r = amdgpu_sync_fence(&p->job->sync, fence);
 		dma_fence_put(fence);
 		if (r)
 			return r;
@@ -1014,7 +1014,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
 		return r;
 	}
 
-	r = amdgpu_sync_fence(&p->job->sync, fence, true);
+	r = amdgpu_sync_fence(&p->job->sync, fence);
 	dma_fence_put(fence);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 499ddb0c75d2..a4576a81794a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -87,9 +87,10 @@
  * - 3.36.0 - Allow reading more status registers on si/cik
  * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
  * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
+ * - 3.39.0 - DMABUF implicit sync does a full pipeline sync
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	38
+#define KMS_DRIVER_MINOR	39
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index b91853fd66d3..4ffc32b78745 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -178,7 +178,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 
 	need_ctx_switch = ring->current_ctx != fence_ctx;
 	if (ring->funcs->emit_pipeline_sync && job &&
-	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
+	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
 	     (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
 	     amdgpu_vm_need_pipeline_sync(ring, job))) {
 		need_pipe_sync = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index fe92dcd94d4a..267fa45ddb66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -206,7 +206,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 	int r;
 
 	if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
-		return amdgpu_sync_fence(sync, ring->vmid_wait, false);
+		return amdgpu_sync_fence(sync, ring->vmid_wait);
 
 	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
 	if (!fences)
@@ -241,7 +241,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 			return -ENOMEM;
 		}
 
-		r = amdgpu_sync_fence(sync, &array->base, false);
+		r = amdgpu_sync_fence(sync, &array->base);
 		dma_fence_put(ring->vmid_wait);
 		ring->vmid_wait = &array->base;
 		return r;
@@ -294,7 +294,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
 		if (tmp) {
 			*id = NULL;
-			r = amdgpu_sync_fence(sync, tmp, false);
+			r = amdgpu_sync_fence(sync, tmp);
 			return r;
 		}
 		needs_flush = true;
@@ -303,7 +303,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 	/* Good we can use this VMID. Remember this submission as
 	* user of the VMID.
 	*/
-	r = amdgpu_sync_fence(&(*id)->active, fence, false);
+	r = amdgpu_sync_fence(&(*id)->active, fence);
 	if (r)
 		return r;
 
@@ -375,7 +375,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
 		/* Good, we can use this VMID. Remember this submission as
 		 * user of the VMID.
 		 */
-		r = amdgpu_sync_fence(&(*id)->active, fence, false);
+		r = amdgpu_sync_fence(&(*id)->active, fence);
 		if (r)
 			return r;
 
@@ -435,7 +435,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 			id = idle;
 
 			/* Remember this submission as user of the VMID */
-			r = amdgpu_sync_fence(&id->active, fence, false);
+			r = amdgpu_sync_fence(&id->active, fence);
 			if (r)
 				goto error;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 47207188c569..2975c4a6e581 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -183,16 +183,13 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
 	struct amdgpu_vm *vm = job->vm;
 	struct dma_fence *fence;
-	bool explicit = false;
 	int r;
 
-	fence = amdgpu_sync_get_fence(&job->sync, &explicit);
-	if (fence && explicit) {
-		if (drm_sched_dependency_optimized(fence, s_entity)) {
-			r = amdgpu_sync_fence(&job->sched_sync, fence, false);
-			if (r)
-				DRM_ERROR("Error adding fence (%d)\n", r);
-		}
+	fence = amdgpu_sync_get_fence(&job->sync);
+	if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
+		r = amdgpu_sync_fence(&job->sched_sync, fence);
+		if (r)
+			DRM_ERROR("Error adding fence (%d)\n", r);
 	}
 
 	while (fence == NULL && vm && !job->vmid) {
@@ -202,7 +199,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 		if (r)
 			DRM_ERROR("Error getting VM ID (%d)\n", r);
 
-		fence = amdgpu_sync_get_fence(&job->sync, NULL);
+		fence = amdgpu_sync_get_fence(&job->sync);
 	}
 
 	return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index b87ca171986a..b9bfbf4b6801 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -35,7 +35,6 @@
 struct amdgpu_sync_entry {
 	struct hlist_node	node;
 	struct dma_fence	*fence;
-	bool	explicit;
 };
 
 static struct kmem_cache *amdgpu_sync_slab;
@@ -129,8 +128,7 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
  * Tries to add the fence to an existing hash entry. Returns true when an entry
  * was found, false otherwise.
  */
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
-				  bool explicit)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
 {
 	struct amdgpu_sync_entry *e;
 
@@ -139,10 +137,6 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
 			continue;
 
 		amdgpu_sync_keep_later(&e->fence, f);
-
-		/* Preserve eplicit flag to not loose pipe line sync */
-		e->explicit |= explicit;
-
 		return true;
 	}
 	return false;
@@ -153,27 +147,23 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
  *
  * @sync: sync object to add fence to
  * @f: fence to sync to
- * @explicit: if this is an explicit dependency
  *
  * Add the fence to the sync object.
  */
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
-		      bool explicit)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
 {
 	struct amdgpu_sync_entry *e;
 
 	if (!f)
 		return 0;
 
-	if (amdgpu_sync_add_later(sync, f, explicit))
+	if (amdgpu_sync_add_later(sync, f))
 		return 0;
 
 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
 	if (!e)
 		return -ENOMEM;
 
-	e->explicit = explicit;
-
 	hash_add(sync->fences, &e->node, f->context);
 	e->fence = dma_fence_get(f);
 	return 0;
@@ -194,7 +184,7 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
 		return 0;
 
 	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
-	return amdgpu_sync_fence(sync, fence, false);
+	return amdgpu_sync_fence(sync, fence);
 }
 
 /**
@@ -221,7 +211,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 
 	/* always sync to the exclusive fence */
 	f = dma_resv_get_excl(resv);
-	r = amdgpu_sync_fence(sync, f, false);
+	r = amdgpu_sync_fence(sync, f);
 
 	flist = dma_resv_get_list(resv);
 	if (!flist || r)
@@ -237,7 +227,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 
 		/* Always sync to moves, no matter what */
 		if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
-			r = amdgpu_sync_fence(sync, f, false);
+			r = amdgpu_sync_fence(sync, f);
 			if (r)
 				break;
 		}
@@ -275,7 +265,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 			continue;
 		}
 
-		r = amdgpu_sync_fence(sync, f, false);
+		r = amdgpu_sync_fence(sync, f);
 		if (r)
 			break;
 	}
@@ -330,11 +320,10 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
  * amdgpu_sync_get_fence - get the next fence from the sync object
  *
  * @sync: sync object to use
- * @explicit: true if the next fence is explicit
  *
  * Get and removes the next fence from the sync object not signaled yet.
  */
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 {
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
@@ -343,8 +332,6 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 
 		f = e->fence;
-		if (explicit)
-			*explicit = e->explicit;
 
 		hash_del(&e->node);
 		kmem_cache_free(amdgpu_sync_slab, e);
@@ -376,7 +363,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
 	hash_for_each_safe(source->fences, i, tmp, e, node) {
 		f = e->fence;
 		if (!dma_fence_is_signaled(f)) {
-			r = amdgpu_sync_fence(clone, f, e->explicit);
+			r = amdgpu_sync_fence(clone, f);
 			if (r)
 				return r;
 		} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index cfbe5788b8b9..7c0fe20c470d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -47,16 +47,14 @@ struct amdgpu_sync {
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
-		      bool explicit);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
 		     void *owner);
 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 				     struct amdgpu_ring *ring);
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
-					bool *explicit);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 8d9c6feba660..28bdfb3ac33d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -208,7 +208,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
 	int r;
 
 	/* Wait for PD/PT moves to be completed */
-	r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
+	r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving);
 	if (r)
 		return r;
 
-- 
2.17.1


[-- Attachment #3: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* RE: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
  2020-06-09 10:57 [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2) Marek Olšák
@ 2020-06-10  6:29 ` Zhou, David(ChunMing)
  2020-06-10  7:41   ` Christian König
  0 siblings, 1 reply; 8+ messages in thread
From: Zhou, David(ChunMing) @ 2020-06-10  6:29 UTC (permalink / raw)
  To: Marek Olšák, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 871 bytes --]

[AMD Official Use Only - Internal Distribution Only]

Not sue if this is right direction, I think usermode wants all synchronizations to be explicit. Implicit sync often confuses people who don't know its history. I remember Jason from Intel  is driving explicit synchronization through the Linux ecosystem, which even removes implicit sync of shared buffer.

-David

From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Marek Olšák
Sent: Tuesday, June 9, 2020 6:58 PM
To: amd-gfx mailing list <amd-gfx@lists.freedesktop.org>
Subject: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)

Hi,

This enables a full pipeline sync for implicit sync. It's Christian's patch with the driver version bumped. With this, user mode drivers don't have to wait for idle at the end of gfx IBs.

Any concerns?

Thanks,
Marek

[-- Attachment #1.2: Type: text/html, Size: 3720 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
  2020-06-10  6:29 ` Zhou, David(ChunMing)
@ 2020-06-10  7:41   ` Christian König
  2020-06-10  7:55     ` Chunming Zhou
  0 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2020-06-10  7:41 UTC (permalink / raw)
  To: Zhou, David(ChunMing), Marek Olšák, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 1848 bytes --]

That's true, but for now we are stuck with the implicit sync for quite a 
number of use cases.

My problem is rather that we already tried this and it backfired 
immediately.

I do remember that it was your patch who introduced the pipeline sync 
flag handling and I warned that this could be problematic. You then came 
back with a QA result saying that this is indeed causing a huge 
performance drop in one test case and we need to do something else. 
Together we then came up with the different handling between implicit 
and explicit sync.

But I can't find that stupid mail thread any more. I knew that it was a 
couple of years ago when we started with the explicit sync for Vulkan.

Christian.

Am 10.06.20 um 08:29 schrieb Zhou, David(ChunMing):
>
> [AMD Official Use Only - Internal Distribution Only]
>
> Not sue if this is right direction, I think usermode wants all 
> synchronizations to be explicit. Implicit sync often confuses people 
> who don’t know its history. I remember Jason from Intel  is driving 
> explicit synchronization through the Linux ecosystem, which even 
> removes implicit sync of shared buffer.
>
> -David
>
> *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org> *On Behalf Of 
> *Marek Olšák
> *Sent:* Tuesday, June 9, 2020 6:58 PM
> *To:* amd-gfx mailing list <amd-gfx@lists.freedesktop.org>
> *Subject:* [PATCH] drm/amdgpu: remove distinction between explicit and 
> implicit sync (v2)
>
> Hi,
>
> This enables a full pipeline sync for implicit sync. It's Christian's 
> patch with the driver version bumped. With this, user mode drivers 
> don't have to wait for idle at the end of gfx IBs.
>
> Any concerns?
>
> Thanks,
>
> Marek
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 5733 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
  2020-06-10  7:41   ` Christian König
@ 2020-06-10  7:55     ` Chunming Zhou
  2020-06-11  7:19       ` Marek Olšák
  0 siblings, 1 reply; 8+ messages in thread
From: Chunming Zhou @ 2020-06-10  7:55 UTC (permalink / raw)
  To: christian.koenig, Zhou, David(ChunMing),
	Marek Olšák, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 2468 bytes --]


在 2020/6/10 15:41, Christian König 写道:
> That's true, but for now we are stuck with the implicit sync for quite 
> a number of use cases.
>
> My problem is rather that we already tried this and it backfired 
> immediately.
>
> I do remember that it was your patch who introduced the pipeline sync 
> flag handling and I warned that this could be problematic. You then 
> came back with a QA result saying that this is indeed causing a huge 
> performance drop in one test case and we need to do something else. 
> Together we then came up with the different handling between implicit 
> and explicit sync.

Isn't pipeline sync flag to fix some issue because of parralel execution 
between jobs in one pipeline?  I really don't have this memory in mind 
why that's realted to this, Or do you mean extra sync hides many other 
potential issues?

Anyway, when I go through Vulkan WSI code, the synchronization isn't so 
smooth between OS window system. And when I saw Jason drives explicit 
sync through the whole Linux ecosystem like Android window system does, 
I feel that's really a good direction.

-David

>
> But I can't find that stupid mail thread any more. I knew that it was 
> a couple of years ago when we started with the explicit sync for Vulkan.
>
> Christian.
>
> Am 10.06.20 um 08:29 schrieb Zhou, David(ChunMing):
>>
>> [AMD Official Use Only - Internal Distribution Only]
>>
>> Not sue if this is right direction, I think usermode wants all 
>> synchronizations to be explicit. Implicit sync often confuses people 
>> who don’t know its history. I remember Jason from Intel  is driving 
>> explicit synchronization through the Linux ecosystem, which even 
>> removes implicit sync of shared buffer.
>>
>> -David
>>
>> *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org> *On Behalf Of 
>> *Marek Olšák
>> *Sent:* Tuesday, June 9, 2020 6:58 PM
>> *To:* amd-gfx mailing list <amd-gfx@lists.freedesktop.org>
>> *Subject:* [PATCH] drm/amdgpu: remove distinction between explicit 
>> and implicit sync (v2)
>>
>> Hi,
>>
>> This enables a full pipeline sync for implicit sync. It's Christian's 
>> patch with the driver version bumped. With this, user mode drivers 
>> don't have to wait for idle at the end of gfx IBs.
>>
>> Any concerns?
>>
>> Thanks,
>>
>> Marek
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

[-- Attachment #1.2: Type: text/html, Size: 7366 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
  2020-06-10  7:55     ` Chunming Zhou
@ 2020-06-11  7:19       ` Marek Olšák
  2020-06-11 12:13         ` Chunming Zhou
  0 siblings, 1 reply; 8+ messages in thread
From: Marek Olšák @ 2020-06-11  7:19 UTC (permalink / raw)
  To: Chunming Zhou
  Cc: Zhou, David(ChunMing), Christian König, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 3223 bytes --]

Hi David,

Explicit sync has nothing to do with this. This is for implicit sync, which
is required by DRI3. This fix allows removing existing inefficiencies from
drivers, so it's a good thing.

Marek

On Wed., Jun. 10, 2020, 03:56 Chunming Zhou, <zhoucm1@amd.com> wrote:

>
> 在 2020/6/10 15:41, Christian König 写道:
>
> That's true, but for now we are stuck with the implicit sync for quite a
> number of use cases.
>
> My problem is rather that we already tried this and it backfired
> immediately.
>
> I do remember that it was your patch who introduced the pipeline sync flag
> handling and I warned that this could be problematic. You then came back
> with a QA result saying that this is indeed causing a huge performance drop
> in one test case and we need to do something else. Together we then came up
> with the different handling between implicit and explicit sync.
>
> Isn't pipeline sync flag to fix some issue because of parralel execution
> between jobs in one pipeline?  I really don't have this memory in mind why
> that's realted to this, Or do you mean extra sync hides many other
> potential issues?
>
> Anyway, when I go through Vulkan WSI code, the synchronization isn't so
> smooth between OS window system. And when I saw Jason drives explicit sync
> through the whole Linux ecosystem like Android window system does, I feel
> that's really a good direction.
>
> -David
>
>
> But I can't find that stupid mail thread any more. I knew that it was a
> couple of years ago when we started with the explicit sync for Vulkan.
>
> Christian.
>
> Am 10.06.20 um 08:29 schrieb Zhou, David(ChunMing):
>
> [AMD Official Use Only - Internal Distribution Only]
>
>
>
> Not sue if this is right direction, I think usermode wants all
> synchronizations to be explicit. Implicit sync often confuses people who
> don’t know its history. I remember Jason from Intel  is driving explicit
> synchronization through the Linux ecosystem, which even removes implicit
> sync of shared buffer.
>
>
>
> -David
>
>
>
> *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org>
> <amd-gfx-bounces@lists.freedesktop.org> *On Behalf Of *Marek Olšák
> *Sent:* Tuesday, June 9, 2020 6:58 PM
> *To:* amd-gfx mailing list <amd-gfx@lists.freedesktop.org>
> <amd-gfx@lists.freedesktop.org>
> *Subject:* [PATCH] drm/amdgpu: remove distinction between explicit and
> implicit sync (v2)
>
>
>
> Hi,
>
>
>
> This enables a full pipeline sync for implicit sync. It's Christian's
> patch with the driver version bumped. With this, user mode drivers don't
> have to wait for idle at the end of gfx IBs.
>
>
>
> Any concerns?
>
>
>
> Thanks,
>
> Marek
>
> _______________________________________________
> amd-gfx mailing listamd-gfx@lists.freedesktop.orghttps://lists.freedesktop.org/mailman/listinfo/amd-gfx <https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CDavid1.Zhou%40amd.com%7C47e97eb8e3654708f7ff08d80d11bcf7%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637273717119277965&sdata=EFq2J33HIOiIS3bok2ZcIz%2FhE%2FVs9T0HPy0UFi92Nf4%3D&reserved=0>
>
>
>

[-- Attachment #1.2: Type: text/html, Size: 6010 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
  2020-06-11  7:19       ` Marek Olšák
@ 2020-06-11 12:13         ` Chunming Zhou
  2020-06-12 11:20           ` Christian König
  2020-06-12 22:29           ` Marek Olšák
  0 siblings, 2 replies; 8+ messages in thread
From: Chunming Zhou @ 2020-06-11 12:13 UTC (permalink / raw)
  To: Marek Olšák
  Cc: Zhou, David(ChunMing), Christian König, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 3754 bytes --]

I didn't check the patch details, if it is for existing implicit sync of 
shared buffer, feel free go ahead.

But if you add some description for its usage, that will be more clear 
to others.

-David

在 2020/6/11 15:19, Marek Olšák 写道:
> Hi David,
>
> Explicit sync has nothing to do with this. This is for implicit sync, 
> which is required by DRI3. This fix allows removing existing 
> inefficiencies from drivers, so it's a good thing.
>
> Marek
>
> On Wed., Jun. 10, 2020, 03:56 Chunming Zhou, <zhoucm1@amd.com 
> <mailto:zhoucm1@amd.com>> wrote:
>
>
>     在 2020/6/10 15:41, Christian König 写道:
>>     That's true, but for now we are stuck with the implicit sync for
>>     quite a number of use cases.
>>
>>     My problem is rather that we already tried this and it backfired
>>     immediately.
>>
>>     I do remember that it was your patch who introduced the pipeline
>>     sync flag handling and I warned that this could be problematic.
>>     You then came back with a QA result saying that this is indeed
>>     causing a huge performance drop in one test case and we need to
>>     do something else. Together we then came up with the different
>>     handling between implicit and explicit sync.
>
>     Isn't pipeline sync flag to fix some issue because of parralel
>     execution between jobs in one pipeline?  I really don't have this
>     memory in mind why that's realted to this, Or do you mean extra
>     sync hides many other potential issues?
>
>     Anyway, when I go through Vulkan WSI code, the synchronization
>     isn't so smooth between OS window system. And when I saw Jason
>     drives explicit sync through the whole Linux ecosystem like
>     Android window system does, I feel that's really a good direction.
>
>     -David
>
>>
>>     But I can't find that stupid mail thread any more. I knew that it
>>     was a couple of years ago when we started with the explicit sync
>>     for Vulkan.
>>
>>     Christian.
>>
>>     Am 10.06.20 um 08:29 schrieb Zhou, David(ChunMing):
>>>
>>>     [AMD Official Use Only - Internal Distribution Only]
>>>
>>>     Not sue if this is right direction, I think usermode wants all
>>>     synchronizations to be explicit. Implicit sync often confuses
>>>     people who don’t know its history. I remember Jason from Intel
>>>      is driving explicit synchronization through the Linux
>>>     ecosystem, which even removes implicit sync of shared buffer.
>>>
>>>     -David
>>>
>>>     *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org>
>>>     <mailto:amd-gfx-bounces@lists.freedesktop.org> *On Behalf Of
>>>     *Marek Olšák
>>>     *Sent:* Tuesday, June 9, 2020 6:58 PM
>>>     *To:* amd-gfx mailing list <amd-gfx@lists.freedesktop.org>
>>>     <mailto:amd-gfx@lists.freedesktop.org>
>>>     *Subject:* [PATCH] drm/amdgpu: remove distinction between
>>>     explicit and implicit sync (v2)
>>>
>>>     Hi,
>>>
>>>     This enables a full pipeline sync for implicit sync. It's
>>>     Christian's patch with the driver version bumped. With this,
>>>     user mode drivers don't have to wait for idle at the end of gfx IBs.
>>>
>>>     Any concerns?
>>>
>>>     Thanks,
>>>
>>>     Marek
>>>
>>>
>>>     _______________________________________________
>>>     amd-gfx mailing list
>>>     amd-gfx@lists.freedesktop.org  <mailto:amd-gfx@lists.freedesktop.org>
>>>     https://lists.freedesktop.org/mailman/listinfo/amd-gfx  <https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CDavid1.Zhou%40amd.com%7C0d3096fc043f4443f14e08d80dd7c674%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637274567683552668&sdata=xIHDswGRsdCP%2BE7MRI4nKXdoMgV2LBzFPP46zGpQusk%3D&reserved=0>
>>

[-- Attachment #1.2: Type: text/html, Size: 7706 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
  2020-06-11 12:13         ` Chunming Zhou
@ 2020-06-12 11:20           ` Christian König
  2020-06-12 22:29           ` Marek Olšák
  1 sibling, 0 replies; 8+ messages in thread
From: Christian König @ 2020-06-12 11:20 UTC (permalink / raw)
  To: Chunming Zhou, Marek Olšák
  Cc: Zhou, David(ChunMing), Christian König, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 4331 bytes --]

My only concern is that this becomes UAPI as soon as we increase the 
minor number.

So if we find that this has some negative side effects we can never go 
back again.

But the choice is up to you guys, from my side it is perfectly good to go.

Christian.

Am 11.06.20 um 14:13 schrieb Chunming Zhou:
>
> I didn't check the patch details, if it is for existing implicit sync 
> of shared buffer, feel free go ahead.
>
> But if you add some description for its usage, that will be more clear 
> to others.
>
> -David
>
> 在 2020/6/11 15:19, Marek Olšák 写道:
>> Hi David,
>>
>> Explicit sync has nothing to do with this. This is for implicit sync, 
>> which is required by DRI3. This fix allows removing existing 
>> inefficiencies from drivers, so it's a good thing.
>>
>> Marek
>>
>> On Wed., Jun. 10, 2020, 03:56 Chunming Zhou, <zhoucm1@amd.com 
>> <mailto:zhoucm1@amd.com>> wrote:
>>
>>
>>     在 2020/6/10 15:41, Christian König 写道:
>>>     That's true, but for now we are stuck with the implicit sync for
>>>     quite a number of use cases.
>>>
>>>     My problem is rather that we already tried this and it backfired
>>>     immediately.
>>>
>>>     I do remember that it was your patch who introduced the pipeline
>>>     sync flag handling and I warned that this could be problematic.
>>>     You then came back with a QA result saying that this is indeed
>>>     causing a huge performance drop in one test case and we need to
>>>     do something else. Together we then came up with the different
>>>     handling between implicit and explicit sync.
>>
>>     Isn't pipeline sync flag to fix some issue because of parralel
>>     execution between jobs in one pipeline?  I really don't have this
>>     memory in mind why that's realted to this, Or do you mean extra
>>     sync hides many other potential issues?
>>
>>     Anyway, when I go through Vulkan WSI code, the synchronization
>>     isn't so smooth between OS window system. And when I saw Jason
>>     drives explicit sync through the whole Linux ecosystem like
>>     Android window system does, I feel that's really a good direction.
>>
>>     -David
>>
>>>
>>>     But I can't find that stupid mail thread any more. I knew that
>>>     it was a couple of years ago when we started with the explicit
>>>     sync for Vulkan.
>>>
>>>     Christian.
>>>
>>>     Am 10.06.20 um 08:29 schrieb Zhou, David(ChunMing):
>>>>
>>>>     [AMD Official Use Only - Internal Distribution Only]
>>>>
>>>>     Not sue if this is right direction, I think usermode wants all
>>>>     synchronizations to be explicit. Implicit sync often confuses
>>>>     people who don’t know its history. I remember Jason from Intel
>>>>      is driving explicit synchronization through the Linux
>>>>     ecosystem, which even removes implicit sync of shared buffer.
>>>>
>>>>     -David
>>>>
>>>>     *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org>
>>>>     <mailto:amd-gfx-bounces@lists.freedesktop.org> *On Behalf Of
>>>>     *Marek Olšák
>>>>     *Sent:* Tuesday, June 9, 2020 6:58 PM
>>>>     *To:* amd-gfx mailing list <amd-gfx@lists.freedesktop.org>
>>>>     <mailto:amd-gfx@lists.freedesktop.org>
>>>>     *Subject:* [PATCH] drm/amdgpu: remove distinction between
>>>>     explicit and implicit sync (v2)
>>>>
>>>>     Hi,
>>>>
>>>>     This enables a full pipeline sync for implicit sync. It's
>>>>     Christian's patch with the driver version bumped. With this,
>>>>     user mode drivers don't have to wait for idle at the end of gfx
>>>>     IBs.
>>>>
>>>>     Any concerns?
>>>>
>>>>     Thanks,
>>>>
>>>>     Marek
>>>>
>>>>
>>>>     _______________________________________________
>>>>     amd-gfx mailing list
>>>>     amd-gfx@lists.freedesktop.org  <mailto:amd-gfx@lists.freedesktop.org>
>>>>     https://lists.freedesktop.org/mailman/listinfo/amd-gfx  <https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CDavid1.Zhou%40amd.com%7C0d3096fc043f4443f14e08d80dd7c674%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637274567683552668&sdata=xIHDswGRsdCP%2BE7MRI4nKXdoMgV2LBzFPP46zGpQusk%3D&reserved=0>
>>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[-- Attachment #1.2: Type: text/html, Size: 9226 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2)
  2020-06-11 12:13         ` Chunming Zhou
  2020-06-12 11:20           ` Christian König
@ 2020-06-12 22:29           ` Marek Olšák
  1 sibling, 0 replies; 8+ messages in thread
From: Marek Olšák @ 2020-06-12 22:29 UTC (permalink / raw)
  To: Chunming Zhou
  Cc: Zhou, David(ChunMing), Christian König, amd-gfx mailing list


[-- Attachment #1.1: Type: text/plain, Size: 4725 bytes --]

The usage is that UMDs will no longer have to wait for idle at the end of
IBs. If you have WAIT_REG_MEM or PS/CS_PARTIAL_FLUSH at the end of IBs, you
can remove that. The responsibility to sync is taken over by the kernel
driver.

This has a potential to increase performance for fullscreen applications,
because the kernel will sync only when the sync is required for
inter-process sharing, which is never for fullscreen apps.

Also if 2 or more windowed apps are rendering, there will be no longer any
sync when switching from one process to the next in the gfx ring. The sync
will only happen before the compositor starts drawing the fullscreen frame.
Therefore, the windowed apps running in parallel should run faster.

If the UMD syncs at the beginning of IBs (common e.g. with DCC fast clear),
there will be no improvement in performance. For any improvement to be
there, UMDs shouldn't sync at the beginning of IBs either, but this may not
always be possible. (a fast color clear needs a sync, while a fast Z/S
clear doesn't)

Marek

On Thu, Jun 11, 2020 at 8:13 AM Chunming Zhou <zhoucm1@amd.com> wrote:

> I didn't check the patch details, if it is for existing implicit sync of
> shared buffer, feel free go ahead.
>
> But if you add some description for its usage, that will be more clear to
> others.
>
> -David
> 在 2020/6/11 15:19, Marek Olšák 写道:
>
> Hi David,
>
> Explicit sync has nothing to do with this. This is for implicit sync,
> which is required by DRI3. This fix allows removing existing inefficiencies
> from drivers, so it's a good thing.
>
> Marek
>
> On Wed., Jun. 10, 2020, 03:56 Chunming Zhou, <zhoucm1@amd.com> wrote:
>
>>
>> 在 2020/6/10 15:41, Christian König 写道:
>>
>> That's true, but for now we are stuck with the implicit sync for quite a
>> number of use cases.
>>
>> My problem is rather that we already tried this and it backfired
>> immediately.
>>
>> I do remember that it was your patch who introduced the pipeline sync
>> flag handling and I warned that this could be problematic. You then came
>> back with a QA result saying that this is indeed causing a huge performance
>> drop in one test case and we need to do something else. Together we then
>> came up with the different handling between implicit and explicit sync.
>>
>> Isn't pipeline sync flag to fix some issue because of parralel execution
>> between jobs in one pipeline?  I really don't have this memory in mind why
>> that's realted to this, Or do you mean extra sync hides many other
>> potential issues?
>>
>> Anyway, when I go through Vulkan WSI code, the synchronization isn't so
>> smooth between OS window system. And when I saw Jason drives explicit sync
>> through the whole Linux ecosystem like Android window system does, I feel
>> that's really a good direction.
>>
>> -David
>>
>>
>> But I can't find that stupid mail thread any more. I knew that it was a
>> couple of years ago when we started with the explicit sync for Vulkan.
>>
>> Christian.
>>
>> Am 10.06.20 um 08:29 schrieb Zhou, David(ChunMing):
>>
>> [AMD Official Use Only - Internal Distribution Only]
>>
>>
>>
>> Not sue if this is right direction, I think usermode wants all
>> synchronizations to be explicit. Implicit sync often confuses people who
>> don’t know its history. I remember Jason from Intel  is driving explicit
>> synchronization through the Linux ecosystem, which even removes implicit
>> sync of shared buffer.
>>
>>
>>
>> -David
>>
>>
>>
>> *From:* amd-gfx <amd-gfx-bounces@lists.freedesktop.org>
>> <amd-gfx-bounces@lists.freedesktop.org> *On Behalf Of *Marek Olšák
>> *Sent:* Tuesday, June 9, 2020 6:58 PM
>> *To:* amd-gfx mailing list <amd-gfx@lists.freedesktop.org>
>> <amd-gfx@lists.freedesktop.org>
>> *Subject:* [PATCH] drm/amdgpu: remove distinction between explicit and
>> implicit sync (v2)
>>
>>
>>
>> Hi,
>>
>>
>>
>> This enables a full pipeline sync for implicit sync. It's Christian's
>> patch with the driver version bumped. With this, user mode drivers don't
>> have to wait for idle at the end of gfx IBs.
>>
>>
>>
>> Any concerns?
>>
>>
>>
>> Thanks,
>>
>> Marek
>>
>> _______________________________________________
>> amd-gfx mailing listamd-gfx@lists.freedesktop.orghttps://lists.freedesktop.org/mailman/listinfo/amd-gfx <https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CDavid1.Zhou%40amd.com%7C0d3096fc043f4443f14e08d80dd7c674%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637274567683552668&sdata=xIHDswGRsdCP%2BE7MRI4nKXdoMgV2LBzFPP46zGpQusk%3D&reserved=0>
>>
>>
>>

[-- Attachment #1.2: Type: text/html, Size: 8890 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-06-12 22:30 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-09 10:57 [PATCH] drm/amdgpu: remove distinction between explicit and implicit sync (v2) Marek Olšák
2020-06-10  6:29 ` Zhou, David(ChunMing)
2020-06-10  7:41   ` Christian König
2020-06-10  7:55     ` Chunming Zhou
2020-06-11  7:19       ` Marek Olšák
2020-06-11 12:13         ` Chunming Zhou
2020-06-12 11:20           ` Christian König
2020-06-12 22:29           ` Marek Olšák

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.