All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/8] drm/amdgpu: make VMID assignment more fair
@ 2018-01-31 15:47 Christian König
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

To guarantee fairness between processes grab reserved VMID only when
there is an idle one.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index c13cf7e79b2e..7a3d0de7425d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -268,11 +268,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	int r = 0;
 
 	mutex_lock(&id_mgr->lock);
-	if (vm->reserved_vmid[vmhub]) {
-		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
-		mutex_unlock(&id_mgr->lock);
-		return r;
-	}
 	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
 	if (!fences) {
 		mutex_unlock(&id_mgr->lock);
@@ -319,6 +314,13 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	}
 	kfree(fences);
 
+	if (vm->reserved_vmid[vmhub]) {
+		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
+						     fence, job);
+		mutex_unlock(&id_mgr->lock);
+		return r;
+	}
+
 	job->vm_needs_flush = vm->use_cpu_for_update;
 	/* Check if we can use a VMID already assigned to this VM */
 	list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 2/8] drm/amdgpu: split finding idle VMID into separate function
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-01-31 15:47   ` Christian König
       [not found]     ` <20180131154756.3374-2-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-01-31 15:47   ` [PATCH 3/8] drm/amdgpu: make VMID owner none atomic Christian König
                     ` (6 subsequent siblings)
  7 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

No functional change, but makes it easier to maintain the code.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 116 +++++++++++++++++++-------------
 1 file changed, 69 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 7a3d0de7425d..fbe958f7cb5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -182,6 +182,72 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
 		atomic_read(&adev->gpu_reset_counter);
 }
 
+/**
+ * amdgpu_vm_grab_idle - grab idle VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @idle: resulting idle VMID
+ *
+ * Try to find an idle VMID, if none is idle add a fence to wait to the sync
+ * object. Returns -ENOMEM when we are out of memory.
+ */
+static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
+				 struct amdgpu_ring *ring,
+				 struct amdgpu_sync *sync,
+				 struct amdgpu_vmid **idle)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned vmhub = ring->funcs->vmhub;
+	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+	struct dma_fence **fences;
+	unsigned i;
+	int r;
+
+	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
+	if (!fences)
+		return -ENOMEM;
+
+	/* Check if we have an idle VMID */
+	i = 0;
+	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
+		if (!fences[i])
+			break;
+		++i;
+	}
+
+	/* If we can't find a idle VMID to use, wait till one becomes available */
+	if (&(*idle)->list == &id_mgr->ids_lru) {
+		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+		struct dma_fence_array *array;
+		unsigned j;
+
+		*idle = NULL;
+		for (j = 0; j < i; ++j)
+			dma_fence_get(fences[j]);
+
+		array = dma_fence_array_create(i, fences, fence_context,
+					       seqno, true);
+		if (!array) {
+			for (j = 0; j < i; ++j)
+				dma_fence_put(fences[j]);
+			kfree(fences);
+			return -ENOMEM;
+		}
+
+		r = amdgpu_sync_fence(adev, sync, &array->base, false);
+		dma_fence_put(&array->base);
+		return r;
+
+	}
+	kfree(fences);
+
+	return 0;
+}
+
 /* idr_mgr->lock must be held */
 static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 					    struct amdgpu_ring *ring,
@@ -263,56 +329,12 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	uint64_t fence_context = adev->fence_context + ring->idx;
 	struct dma_fence *updates = sync->last_vm_update;
 	struct amdgpu_vmid *id, *idle;
-	struct dma_fence **fences;
-	unsigned i;
 	int r = 0;
 
 	mutex_lock(&id_mgr->lock);
-	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
-	if (!fences) {
-		mutex_unlock(&id_mgr->lock);
-		return -ENOMEM;
-	}
-	/* Check if we have an idle VMID */
-	i = 0;
-	list_for_each_entry(idle, &id_mgr->ids_lru, list) {
-		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
-		if (!fences[i])
-			break;
-		++i;
-	}
-
-	/* If we can't find a idle VMID to use, wait till one becomes available */
-	if (&idle->list == &id_mgr->ids_lru) {
-		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
-		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
-		struct dma_fence_array *array;
-		unsigned j;
-
-		for (j = 0; j < i; ++j)
-			dma_fence_get(fences[j]);
-
-		array = dma_fence_array_create(i, fences, fence_context,
-					   seqno, true);
-		if (!array) {
-			for (j = 0; j < i; ++j)
-				dma_fence_put(fences[j]);
-			kfree(fences);
-			r = -ENOMEM;
-			goto error;
-		}
-
-
-		r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
-		dma_fence_put(&array->base);
-		if (r)
-			goto error;
-
-		mutex_unlock(&id_mgr->lock);
-		return 0;
-
-	}
-	kfree(fences);
+	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
+	if (r || !idle)
+		goto error;
 
 	if (vm->reserved_vmid[vmhub]) {
 		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 3/8] drm/amdgpu: make VMID owner none atomic
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-01-31 15:47   ` [PATCH 2/8] drm/amdgpu: split finding idle VMID into separate function Christian König
@ 2018-01-31 15:47   ` Christian König
       [not found]     ` <20180131154756.3374-3-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-01-31 15:47   ` [PATCH 4/8] drm/amdgpu: stop checking GPU reset counter during VMID grab Christian König
                     ` (5 subsequent siblings)
  7 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

The variable is protected by the VMID mutex anyway.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 10 +++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h |  2 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index fbe958f7cb5b..8374fe870e8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -267,7 +267,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 
 	flushed  = id->flushed_updates;
 	if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
-	    (atomic64_read(&id->owner) != vm->entity.fence_context) ||
+	    (id->owner != vm->entity.fence_context) ||
 	    (job->vm_pd_addr != id->pd_gpu_addr) ||
 	    (updates && (!flushed || updates->context != flushed->context ||
 			dma_fence_is_later(updates, flushed))) ||
@@ -296,7 +296,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 		id->flushed_updates = dma_fence_get(updates);
 	}
 	id->pd_gpu_addr = job->vm_pd_addr;
-	atomic64_set(&id->owner, vm->entity.fence_context);
+	id->owner = vm->entity.fence_context;
 	job->vm_needs_flush = needs_flush;
 	if (needs_flush) {
 		dma_fence_put(id->last_flush);
@@ -353,7 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		if (amdgpu_vmid_had_gpu_reset(adev, id))
 			continue;
 
-		if (atomic64_read(&id->owner) != vm->entity.fence_context)
+		if (id->owner != vm->entity.fence_context)
 			continue;
 
 		if (job->vm_pd_addr != id->pd_gpu_addr)
@@ -402,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	id->pd_gpu_addr = job->vm_pd_addr;
 	dma_fence_put(id->flushed_updates);
 	id->flushed_updates = dma_fence_get(updates);
-	atomic64_set(&id->owner, vm->entity.fence_context);
+	id->owner = vm->entity.fence_context;
 
 needs_flush:
 	job->vm_needs_flush = true;
@@ -482,7 +482,7 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
 
-	atomic64_set(&id->owner, 0);
+	id->owner = 0;
 	id->gds_base = 0;
 	id->gds_size = 0;
 	id->gws_base = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 38f37c16fc5e..20d4eca6cd6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -43,7 +43,7 @@ struct amdgpu_vmid {
 	struct list_head	list;
 	struct amdgpu_sync	active;
 	struct dma_fence	*last_flush;
-	atomic64_t		owner;
+	uint64_t		owner;
 
 	uint64_t		pd_gpu_addr;
 	/* last flushed PD/PT update */
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 4/8] drm/amdgpu: stop checking GPU reset counter during VMID grab
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-01-31 15:47   ` [PATCH 2/8] drm/amdgpu: split finding idle VMID into separate function Christian König
  2018-01-31 15:47   ` [PATCH 3/8] drm/amdgpu: make VMID owner none atomic Christian König
@ 2018-01-31 15:47   ` Christian König
       [not found]     ` <20180131154756.3374-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-01-31 15:47   ` [PATCH 5/8] drm/amdgpu: cleanup and simplify amdgpu_vmid_grab_reserved Christian König
                     ` (4 subsequent siblings)
  7 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

We do this later on when we flush the VMID anyway.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 8374fe870e8c..2cb4e1750d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -266,8 +266,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 	bool needs_flush = vm->use_cpu_for_update;
 
 	flushed  = id->flushed_updates;
-	if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
-	    (id->owner != vm->entity.fence_context) ||
+	if ((id->owner != vm->entity.fence_context) ||
 	    (job->vm_pd_addr != id->pd_gpu_addr) ||
 	    (updates && (!flushed || updates->context != flushed->context ||
 			dma_fence_is_later(updates, flushed))) ||
@@ -350,9 +349,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		bool needs_flush = vm->use_cpu_for_update;
 
 		/* Check all the prerequisites to using this VMID */
-		if (amdgpu_vmid_had_gpu_reset(adev, id))
-			continue;
-
 		if (id->owner != vm->entity.fence_context)
 			continue;
 
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 5/8] drm/amdgpu: cleanup and simplify amdgpu_vmid_grab_reserved
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
                     ` (2 preceding siblings ...)
  2018-01-31 15:47   ` [PATCH 4/8] drm/amdgpu: stop checking GPU reset counter during VMID grab Christian König
@ 2018-01-31 15:47   ` Christian König
       [not found]     ` <20180131154756.3374-5-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-01-31 15:47   ` [PATCH 6/8] drm/amdgpu: move reusing VMIDs into separate function Christian König
                     ` (3 subsequent siblings)
  7 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Drop the "_locked" from the name, cleanup and simplify the logic a bit.
Add missing comments.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 58 ++++++++++++++++++++-------------
 1 file changed, 35 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 2cb4e1750d10..86d012b21554 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -248,12 +248,22 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 	return 0;
 }
 
-/* idr_mgr->lock must be held */
-static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
-					    struct amdgpu_ring *ring,
-					    struct amdgpu_sync *sync,
-					    struct dma_fence *fence,
-					    struct amdgpu_job *job)
+/**
+ * amdgpu_vm_grab_reserved - try to assign reserved VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
+ *
+ * Try to assign a reserved VMID.
+ */
+static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+				     struct amdgpu_ring *ring,
+				     struct amdgpu_sync *sync,
+				     struct dma_fence *fence,
+				     struct amdgpu_job *job)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
@@ -261,18 +271,21 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 	struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	struct dma_fence *updates = sync->last_vm_update;
-	int r = 0;
-	struct dma_fence *flushed, *tmp;
 	bool needs_flush = vm->use_cpu_for_update;
+	int r = 0;
+
+	if (updates && id->flushed_updates &&
+	    updates->context == id->flushed_updates->context &&
+	    !dma_fence_is_later(updates, id->flushed_updates))
+	    updates = NULL;
+
+	if (id->owner != vm->entity.fence_context ||
+	    job->vm_pd_addr != id->pd_gpu_addr ||
+	    updates || !id->last_flush ||
+	    (id->last_flush->context != fence_context &&
+	     !dma_fence_is_signaled(id->last_flush))) {
+		struct dma_fence *tmp;
 
-	flushed  = id->flushed_updates;
-	if ((id->owner != vm->entity.fence_context) ||
-	    (job->vm_pd_addr != id->pd_gpu_addr) ||
-	    (updates && (!flushed || updates->context != flushed->context ||
-			dma_fence_is_later(updates, flushed))) ||
-	    (!id->last_flush || (id->last_flush->context != fence_context &&
-				 !dma_fence_is_signaled(id->last_flush)))) {
-		needs_flush = true;
 		/* to prevent one context starved by another context */
 		id->pd_gpu_addr = 0;
 		tmp = amdgpu_sync_peek_fence(&id->active, ring);
@@ -280,6 +293,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 			r = amdgpu_sync_fence(adev, sync, tmp, false);
 			return r;
 		}
+		needs_flush = true;
 	}
 
 	/* Good we can use this VMID. Remember this submission as
@@ -287,10 +301,9 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 	*/
 	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
 	if (r)
-		goto out;
+		return r;
 
-	if (updates && (!flushed || updates->context != flushed->context ||
-			dma_fence_is_later(updates, flushed))) {
+	if (updates) {
 		dma_fence_put(id->flushed_updates);
 		id->flushed_updates = dma_fence_get(updates);
 	}
@@ -304,8 +317,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 	job->vmid = id - id_mgr->ids;
 	job->pasid = vm->pasid;
 	trace_amdgpu_vm_grab_id(vm, ring, job);
-out:
-	return r;
+	return 0;
 }
 
 /**
@@ -315,6 +327,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
  * @ring: ring we want to submit job to
  * @sync: sync object where we add dependencies
  * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
  *
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
  */
@@ -336,8 +349,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		goto error;
 
 	if (vm->reserved_vmid[vmhub]) {
-		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
-						     fence, job);
+		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
 		mutex_unlock(&id_mgr->lock);
 		return r;
 	}
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 6/8] drm/amdgpu: move reusing VMIDs into separate function
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
                     ` (3 preceding siblings ...)
  2018-01-31 15:47   ` [PATCH 5/8] drm/amdgpu: cleanup and simplify amdgpu_vmid_grab_reserved Christian König
@ 2018-01-31 15:47   ` Christian König
       [not found]     ` <20180131154756.3374-6-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-01-31 15:47   ` [PATCH 7/8] drm/amdgpu: restructure amdgpu_vmid_grab Christian König
                     ` (2 subsequent siblings)
  7 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Let's try this once more.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 130 ++++++++++++++++++++------------
 1 file changed, 81 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 86d012b21554..a139c4e2dc53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -321,58 +321,51 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 }
 
 /**
- * amdgpu_vm_grab_id - allocate the next free VMID
+ * amdgpu_vm_grab_used - try to reuse a VMID
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
  * @sync: sync object where we add dependencies
  * @fence: fence protecting ID from reuse
  * @job: job who wants to use the VMID
+ * @id: resulting VMID
  *
- * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ * Try to reuse a VMID for this submission.
  */
-int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		     struct amdgpu_sync *sync, struct dma_fence *fence,
-		     struct amdgpu_job *job)
+static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
+				 struct amdgpu_ring *ring,
+				 struct amdgpu_sync *sync,
+				 struct dma_fence *fence,
+				 struct amdgpu_job *job,
+				 struct amdgpu_vmid **id)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	uint64_t fence_context = adev->fence_context + ring->idx;
 	struct dma_fence *updates = sync->last_vm_update;
-	struct amdgpu_vmid *id, *idle;
-	int r = 0;
-
-	mutex_lock(&id_mgr->lock);
-	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
-	if (r || !idle)
-		goto error;
-
-	if (vm->reserved_vmid[vmhub]) {
-		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
-		mutex_unlock(&id_mgr->lock);
-		return r;
-	}
+	int r;
 
 	job->vm_needs_flush = vm->use_cpu_for_update;
+
 	/* Check if we can use a VMID already assigned to this VM */
-	list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
-		struct dma_fence *flushed;
+	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
 		bool needs_flush = vm->use_cpu_for_update;
+		struct dma_fence *flushed;
 
 		/* Check all the prerequisites to using this VMID */
-		if (id->owner != vm->entity.fence_context)
+		if ((*id)->owner != vm->entity.fence_context)
 			continue;
 
-		if (job->vm_pd_addr != id->pd_gpu_addr)
+		if ((*id)->pd_gpu_addr != job->vm_pd_addr)
 			continue;
 
-		if (!id->last_flush ||
-		    (id->last_flush->context != fence_context &&
-		     !dma_fence_is_signaled(id->last_flush)))
+		if (!(*id)->last_flush ||
+		    ((*id)->last_flush->context != fence_context &&
+		     !dma_fence_is_signaled((*id)->last_flush)))
 			needs_flush = true;
 
-		flushed  = id->flushed_updates;
+		flushed  = (*id)->flushed_updates;
 		if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
 			needs_flush = true;
 
@@ -380,44 +373,83 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		if (adev->asic_type < CHIP_VEGA10 && needs_flush)
 			continue;
 
-		/* Good we can use this VMID. Remember this submission as
+		/* Good, we can use this VMID. Remember this submission as
 		 * user of the VMID.
 		 */
-		r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+		r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
 		if (r)
-			goto error;
+			return r;
 
 		if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
-			dma_fence_put(id->flushed_updates);
-			id->flushed_updates = dma_fence_get(updates);
+			dma_fence_put((*id)->flushed_updates);
+			(*id)->flushed_updates = dma_fence_get(updates);
 		}
 
-		if (needs_flush)
-			goto needs_flush;
-		else
-			goto no_flush_needed;
-
+		job->vm_needs_flush |= needs_flush;
+		return 0;
 	}
 
-	/* Still no ID to use? Then use the idle one found earlier */
-	id = idle;
+	*id = NULL;
+	return 0;
+}
 
-	/* Remember this submission as user of the VMID */
-	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+/**
+ * amdgpu_vm_grab_id - allocate the next free VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
+ *
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ */
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		     struct amdgpu_sync *sync, struct dma_fence *fence,
+		     struct amdgpu_job *job)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned vmhub = ring->funcs->vmhub;
+	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+	struct dma_fence *updates = sync->last_vm_update;
+	struct amdgpu_vmid *id, *idle;
+	int r = 0;
+
+	mutex_lock(&id_mgr->lock);
+	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
+	if (r || !idle)
+		goto error;
+
+	if (vm->reserved_vmid[vmhub]) {
+		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
+		mutex_unlock(&id_mgr->lock);
+		return r;
+	}
+
+	r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
 	if (r)
 		goto error;
 
-	id->pd_gpu_addr = job->vm_pd_addr;
-	dma_fence_put(id->flushed_updates);
-	id->flushed_updates = dma_fence_get(updates);
-	id->owner = vm->entity.fence_context;
+	if (!id) {
+		/* Still no ID to use? Then use the idle one found earlier */
+		id = idle;
 
-needs_flush:
-	job->vm_needs_flush = true;
-	dma_fence_put(id->last_flush);
-	id->last_flush = NULL;
+		/* Remember this submission as user of the VMID */
+		r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+		if (r)
+			goto error;
 
-no_flush_needed:
+		id->pd_gpu_addr = job->vm_pd_addr;
+		dma_fence_put(id->flushed_updates);
+		id->flushed_updates = dma_fence_get(updates);
+		id->owner = vm->entity.fence_context;
+		job->vm_needs_flush = true;
+	}
+
+	if (job->vm_needs_flush) {
+		dma_fence_put(id->last_flush);
+		id->last_flush = NULL;
+	}
 	list_move_tail(&id->list, &id_mgr->ids_lru);
 
 	job->vmid = id - id_mgr->ids;
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 7/8] drm/amdgpu: restructure amdgpu_vmid_grab
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
                     ` (4 preceding siblings ...)
  2018-01-31 15:47   ` [PATCH 6/8] drm/amdgpu: move reusing VMIDs into separate function Christian König
@ 2018-01-31 15:47   ` Christian König
  2018-01-31 15:47   ` [PATCH 8/8] drm/amdgpu: cache the fence to wait for a VMID Christian König
  2018-02-01  4:42   ` [PATCH 1/8] drm/amdgpu: make VMID assignment more fair Chunming Zhou
  7 siblings, 0 replies; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Now that we have the different cases for grabbing a VMID in separate
functions, restructure the top level function to only have one place
where VMIDs are assigned to jobs.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 89 ++++++++++++++++-----------------
 1 file changed, 42 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index a139c4e2dc53..fff575fd55c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -263,33 +263,34 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 				     struct amdgpu_ring *ring,
 				     struct amdgpu_sync *sync,
 				     struct dma_fence *fence,
-				     struct amdgpu_job *job)
+				     struct amdgpu_job *job,
+				     struct amdgpu_vmid **id)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
 	uint64_t fence_context = adev->fence_context + ring->idx;
-	struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
-	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	struct dma_fence *updates = sync->last_vm_update;
 	bool needs_flush = vm->use_cpu_for_update;
 	int r = 0;
 
-	if (updates && id->flushed_updates &&
-	    updates->context == id->flushed_updates->context &&
-	    !dma_fence_is_later(updates, id->flushed_updates))
+	*id = vm->reserved_vmid[vmhub];
+	if (updates && (*id)->flushed_updates &&
+	    updates->context == (*id)->flushed_updates->context &&
+	    !dma_fence_is_later(updates, (*id)->flushed_updates))
 	    updates = NULL;
 
-	if (id->owner != vm->entity.fence_context ||
-	    job->vm_pd_addr != id->pd_gpu_addr ||
-	    updates || !id->last_flush ||
-	    (id->last_flush->context != fence_context &&
-	     !dma_fence_is_signaled(id->last_flush))) {
+	if ((*id)->owner != vm->entity.fence_context ||
+	    job->vm_pd_addr != (*id)->pd_gpu_addr ||
+	    updates || !(*id)->last_flush ||
+	    ((*id)->last_flush->context != fence_context &&
+	     !dma_fence_is_signaled((*id)->last_flush))) {
 		struct dma_fence *tmp;
 
 		/* to prevent one context starved by another context */
-		id->pd_gpu_addr = 0;
-		tmp = amdgpu_sync_peek_fence(&id->active, ring);
+		(*id)->pd_gpu_addr = 0;
+		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
 		if (tmp) {
+			*id = NULL;
 			r = amdgpu_sync_fence(adev, sync, tmp, false);
 			return r;
 		}
@@ -299,24 +300,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 	/* Good we can use this VMID. Remember this submission as
 	* user of the VMID.
 	*/
-	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+	r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
 	if (r)
 		return r;
 
 	if (updates) {
-		dma_fence_put(id->flushed_updates);
-		id->flushed_updates = dma_fence_get(updates);
+		dma_fence_put((*id)->flushed_updates);
+		(*id)->flushed_updates = dma_fence_get(updates);
 	}
-	id->pd_gpu_addr = job->vm_pd_addr;
-	id->owner = vm->entity.fence_context;
 	job->vm_needs_flush = needs_flush;
-	if (needs_flush) {
-		dma_fence_put(id->last_flush);
-		id->last_flush = NULL;
-	}
-	job->vmid = id - id_mgr->ids;
-	job->pasid = vm->pasid;
-	trace_amdgpu_vm_grab_id(vm, ring, job);
 	return 0;
 }
 
@@ -411,7 +403,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-	struct dma_fence *updates = sync->last_vm_update;
 	struct amdgpu_vmid *id, *idle;
 	int r = 0;
 
@@ -421,37 +412,41 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		goto error;
 
 	if (vm->reserved_vmid[vmhub]) {
-		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
-		mutex_unlock(&id_mgr->lock);
-		return r;
-	}
+		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
+		if (r || !id)
+			goto error;
+	} else {
+		r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
+		if (r)
+			goto error;
 
-	r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
-	if (r)
-		goto error;
+		if (!id) {
+			struct dma_fence *updates = sync->last_vm_update;
 
-	if (!id) {
-		/* Still no ID to use? Then use the idle one found earlier */
-		id = idle;
+			/* Still no ID to use? Then use the idle one found earlier */
+			id = idle;
 
-		/* Remember this submission as user of the VMID */
-		r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
-		if (r)
-			goto error;
+			/* Remember this submission as user of the VMID */
+			r = amdgpu_sync_fence(ring->adev, &id->active,
+					      fence, false);
+			if (r)
+				goto error;
 
-		id->pd_gpu_addr = job->vm_pd_addr;
-		dma_fence_put(id->flushed_updates);
-		id->flushed_updates = dma_fence_get(updates);
-		id->owner = vm->entity.fence_context;
-		job->vm_needs_flush = true;
+			dma_fence_put(id->flushed_updates);
+			id->flushed_updates = dma_fence_get(updates);
+			job->vm_needs_flush = true;
+		}
+
+		list_move_tail(&id->list, &id_mgr->ids_lru);
 	}
 
+	id->pd_gpu_addr = job->vm_pd_addr;
+	id->owner = vm->entity.fence_context;
+
 	if (job->vm_needs_flush) {
 		dma_fence_put(id->last_flush);
 		id->last_flush = NULL;
 	}
-	list_move_tail(&id->list, &id_mgr->ids_lru);
-
 	job->vmid = id - id_mgr->ids;
 	job->pasid = vm->pasid;
 	trace_amdgpu_vm_grab_id(vm, ring, job);
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 8/8] drm/amdgpu: cache the fence to wait for a VMID
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
                     ` (5 preceding siblings ...)
  2018-01-31 15:47   ` [PATCH 7/8] drm/amdgpu: restructure amdgpu_vmid_grab Christian König
@ 2018-01-31 15:47   ` Christian König
       [not found]     ` <20180131154756.3374-8-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-02-01  4:42   ` [PATCH 1/8] drm/amdgpu: make VMID assignment more fair Chunming Zhou
  7 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-01-31 15:47 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Beneficial when a lot of processes are waiting for VMIDs.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c  | 7 +++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 3 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 +
 3 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index fff575fd55c0..b4526f98f0c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -205,6 +205,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 	unsigned i;
 	int r;
 
+	if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
+		return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
+
 	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
 	if (!fences)
 		return -ENOMEM;
@@ -239,9 +242,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 		}
 
 		r = amdgpu_sync_fence(adev, sync, &array->base, false);
-		dma_fence_put(&array->base);
+		dma_fence_put(ring->vmid_wait);
+		ring->vmid_wait = &array->base;
 		return r;
-
 	}
 	kfree(fences);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13044e66dcaf..e223b0f6417b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
 
 	amdgpu_debugfs_ring_fini(ring);
 
+	dma_fence_put(ring->vmid_wait);
+	ring->vmid_wait = NULL;
+
 	ring->adev->rings[ring->idx] = NULL;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 867f53332305..075976855651 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -196,6 +196,7 @@ struct amdgpu_ring {
 	u64			cond_exe_gpu_addr;
 	volatile u32		*cond_exe_cpu_addr;
 	unsigned		vm_inv_eng;
+	struct dma_fence	*vmid_wait;
 	bool			has_compute_vm_bug;
 
 	atomic_t		num_jobs[DRM_SCHED_PRIORITY_MAX];
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/8] drm/amdgpu: make VMID assignment more fair
       [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
                     ` (6 preceding siblings ...)
  2018-01-31 15:47   ` [PATCH 8/8] drm/amdgpu: cache the fence to wait for a VMID Christian König
@ 2018-02-01  4:42   ` Chunming Zhou
       [not found]     ` <2e5a2349-5036-dff7-ab06-286bcf9de9b3-5C7GfCeVMHo@public.gmane.org>
  7 siblings, 1 reply; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  4:42 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

NAK, reserved vmid is independent on id_mgr, which is removed from id 
mgr when process allocates reserved vmid.


Regards,

David Zhou


On 2018年01月31日 23:47, Christian König wrote:
> To guarantee fairness between processes grab reserved VMID only when
> there is an idle one.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 12 +++++++-----
>   1 file changed, 7 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index c13cf7e79b2e..7a3d0de7425d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -268,11 +268,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   	int r = 0;
>   
>   	mutex_lock(&id_mgr->lock);
> -	if (vm->reserved_vmid[vmhub]) {
> -		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
> -		mutex_unlock(&id_mgr->lock);
> -		return r;
> -	}
>   	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
>   	if (!fences) {
>   		mutex_unlock(&id_mgr->lock);
> @@ -319,6 +314,13 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   	}
>   	kfree(fences);
>   
> +	if (vm->reserved_vmid[vmhub]) {
> +		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
> +						     fence, job);
> +		mutex_unlock(&id_mgr->lock);
> +		return r;
> +	}
> +
>   	job->vm_needs_flush = vm->use_cpu_for_update;
>   	/* Check if we can use a VMID already assigned to this VM */
>   	list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 2/8] drm/amdgpu: split finding idle VMID into separate function
       [not found]     ` <20180131154756.3374-2-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  5:40       ` Chunming Zhou
  0 siblings, 0 replies; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  5:40 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Reviewed-by: Chunming Zhou <david1.zhou@amd.com>


On 2018年01月31日 23:47, Christian König wrote:
> No functional change, but makes it easier to maintain the code.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 116 +++++++++++++++++++-------------
>   1 file changed, 69 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index 7a3d0de7425d..fbe958f7cb5b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -182,6 +182,72 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
>   		atomic_read(&adev->gpu_reset_counter);
>   }
>   
> +/**
> + * amdgpu_vm_grab_idle - grab idle VMID
> + *
> + * @vm: vm to allocate id for
> + * @ring: ring we want to submit job to
> + * @sync: sync object where we add dependencies
> + * @idle: resulting idle VMID
> + *
> + * Try to find an idle VMID, if none is idle add a fence to wait to the sync
> + * object. Returns -ENOMEM when we are out of memory.
> + */
> +static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
> +				 struct amdgpu_ring *ring,
> +				 struct amdgpu_sync *sync,
> +				 struct amdgpu_vmid **idle)
> +{
> +	struct amdgpu_device *adev = ring->adev;
> +	unsigned vmhub = ring->funcs->vmhub;
> +	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> +	struct dma_fence **fences;
> +	unsigned i;
> +	int r;
> +
> +	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
> +	if (!fences)
> +		return -ENOMEM;
> +
> +	/* Check if we have an idle VMID */
> +	i = 0;
> +	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
> +		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
> +		if (!fences[i])
> +			break;
> +		++i;
> +	}
> +
> +	/* If we can't find a idle VMID to use, wait till one becomes available */
> +	if (&(*idle)->list == &id_mgr->ids_lru) {
> +		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
> +		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
> +		struct dma_fence_array *array;
> +		unsigned j;
> +
> +		*idle = NULL;
> +		for (j = 0; j < i; ++j)
> +			dma_fence_get(fences[j]);
> +
> +		array = dma_fence_array_create(i, fences, fence_context,
> +					       seqno, true);
> +		if (!array) {
> +			for (j = 0; j < i; ++j)
> +				dma_fence_put(fences[j]);
> +			kfree(fences);
> +			return -ENOMEM;
> +		}
> +
> +		r = amdgpu_sync_fence(adev, sync, &array->base, false);
> +		dma_fence_put(&array->base);
> +		return r;
> +
> +	}
> +	kfree(fences);
> +
> +	return 0;
> +}
> +
>   /* idr_mgr->lock must be held */
>   static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   					    struct amdgpu_ring *ring,
> @@ -263,56 +329,12 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   	uint64_t fence_context = adev->fence_context + ring->idx;
>   	struct dma_fence *updates = sync->last_vm_update;
>   	struct amdgpu_vmid *id, *idle;
> -	struct dma_fence **fences;
> -	unsigned i;
>   	int r = 0;
>   
>   	mutex_lock(&id_mgr->lock);
> -	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
> -	if (!fences) {
> -		mutex_unlock(&id_mgr->lock);
> -		return -ENOMEM;
> -	}
> -	/* Check if we have an idle VMID */
> -	i = 0;
> -	list_for_each_entry(idle, &id_mgr->ids_lru, list) {
> -		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
> -		if (!fences[i])
> -			break;
> -		++i;
> -	}
> -
> -	/* If we can't find a idle VMID to use, wait till one becomes available */
> -	if (&idle->list == &id_mgr->ids_lru) {
> -		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
> -		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
> -		struct dma_fence_array *array;
> -		unsigned j;
> -
> -		for (j = 0; j < i; ++j)
> -			dma_fence_get(fences[j]);
> -
> -		array = dma_fence_array_create(i, fences, fence_context,
> -					   seqno, true);
> -		if (!array) {
> -			for (j = 0; j < i; ++j)
> -				dma_fence_put(fences[j]);
> -			kfree(fences);
> -			r = -ENOMEM;
> -			goto error;
> -		}
> -
> -
> -		r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
> -		dma_fence_put(&array->base);
> -		if (r)
> -			goto error;
> -
> -		mutex_unlock(&id_mgr->lock);
> -		return 0;
> -
> -	}
> -	kfree(fences);
> +	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
> +	if (r || !idle)
> +		goto error;
>   
>   	if (vm->reserved_vmid[vmhub]) {
>   		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 3/8] drm/amdgpu: make VMID owner none atomic
       [not found]     ` <20180131154756.3374-3-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  5:44       ` Chunming Zhou
       [not found]         ` <bcaaf808-e368-b386-946c-e6cbdaad690b-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  5:44 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2018年01月31日 23:47, Christian König wrote:
> The variable is protected by the VMID mutex anyway.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 10 +++++-----
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h |  2 +-
>   2 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index fbe958f7cb5b..8374fe870e8c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -267,7 +267,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   
>   	flushed  = id->flushed_updates;
>   	if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
> -	    (atomic64_read(&id->owner) != vm->entity.fence_context) ||
> +	    (id->owner != vm->entity.fence_context) ||
>   	    (job->vm_pd_addr != id->pd_gpu_addr) ||
>   	    (updates && (!flushed || updates->context != flushed->context ||
>   			dma_fence_is_later(updates, flushed))) ||
> @@ -296,7 +296,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   		id->flushed_updates = dma_fence_get(updates);
>   	}
>   	id->pd_gpu_addr = job->vm_pd_addr;
> -	atomic64_set(&id->owner, vm->entity.fence_context);
> +	id->owner = vm->entity.fence_context;
>   	job->vm_needs_flush = needs_flush;
>   	if (needs_flush) {
>   		dma_fence_put(id->last_flush);
> @@ -353,7 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   		if (amdgpu_vmid_had_gpu_reset(adev, id))
>   			continue;
>   
> -		if (atomic64_read(&id->owner) != vm->entity.fence_context)
> +		if (id->owner != vm->entity.fence_context)
>   			continue;
>   
>   		if (job->vm_pd_addr != id->pd_gpu_addr)
> @@ -402,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   	id->pd_gpu_addr = job->vm_pd_addr;
>   	dma_fence_put(id->flushed_updates);
>   	id->flushed_updates = dma_fence_get(updates);
> -	atomic64_set(&id->owner, vm->entity.fence_context);
> +	id->owner = vm->entity.fence_context;
>   
>   needs_flush:
>   	job->vm_needs_flush = true;
> @@ -482,7 +482,7 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
>   	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
>   	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
>   
> -	atomic64_set(&id->owner, 0);
> +	id->owner = 0;
here is not protect by id_mgr mutex, you can make another patch to add 
mutex for this function.

Regards,
David Zhou
>   	id->gds_base = 0;
>   	id->gds_size = 0;
>   	id->gws_base = 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> index 38f37c16fc5e..20d4eca6cd6a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> @@ -43,7 +43,7 @@ struct amdgpu_vmid {
>   	struct list_head	list;
>   	struct amdgpu_sync	active;
>   	struct dma_fence	*last_flush;
> -	atomic64_t		owner;
> +	uint64_t		owner;
>   
>   	uint64_t		pd_gpu_addr;
>   	/* last flushed PD/PT update */

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 4/8] drm/amdgpu: stop checking GPU reset counter during VMID grab
       [not found]     ` <20180131154756.3374-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  5:48       ` Chunming Zhou
  0 siblings, 0 replies; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  5:48 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Reviewed-by: Chunming Zhou <david1.zhou@amd.com>


On 2018年01月31日 23:47, Christian König wrote:
> We do this later on when we flush the VMID anyway.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 6 +-----
>   1 file changed, 1 insertion(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index 8374fe870e8c..2cb4e1750d10 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -266,8 +266,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   	bool needs_flush = vm->use_cpu_for_update;
>   
>   	flushed  = id->flushed_updates;
> -	if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
> -	    (id->owner != vm->entity.fence_context) ||
> +	if ((id->owner != vm->entity.fence_context) ||
>   	    (job->vm_pd_addr != id->pd_gpu_addr) ||
>   	    (updates && (!flushed || updates->context != flushed->context ||
>   			dma_fence_is_later(updates, flushed))) ||
> @@ -350,9 +349,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   		bool needs_flush = vm->use_cpu_for_update;
>   
>   		/* Check all the prerequisites to using this VMID */
> -		if (amdgpu_vmid_had_gpu_reset(adev, id))
> -			continue;
> -
>   		if (id->owner != vm->entity.fence_context)
>   			continue;
>   

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 5/8] drm/amdgpu: cleanup and simplify amdgpu_vmid_grab_reserved
       [not found]     ` <20180131154756.3374-5-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  6:16       ` Chunming Zhou
  0 siblings, 0 replies; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  6:16 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Reviewed-by: Chunming Zhou <david1.zhou@amd.com>


On 2018年01月31日 23:47, Christian König wrote:
> Drop the "_locked" from the name, cleanup and simplify the logic a bit.
> Add missing comments.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 58 ++++++++++++++++++++-------------
>   1 file changed, 35 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index 2cb4e1750d10..86d012b21554 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -248,12 +248,22 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
>   	return 0;
>   }
>   
> -/* idr_mgr->lock must be held */
> -static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
> -					    struct amdgpu_ring *ring,
> -					    struct amdgpu_sync *sync,
> -					    struct dma_fence *fence,
> -					    struct amdgpu_job *job)
> +/**
> + * amdgpu_vm_grab_reserved - try to assign reserved VMID
> + *
> + * @vm: vm to allocate id for
> + * @ring: ring we want to submit job to
> + * @sync: sync object where we add dependencies
> + * @fence: fence protecting ID from reuse
> + * @job: job who wants to use the VMID
> + *
> + * Try to assign a reserved VMID.
> + */
> +static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
> +				     struct amdgpu_ring *ring,
> +				     struct amdgpu_sync *sync,
> +				     struct dma_fence *fence,
> +				     struct amdgpu_job *job)
>   {
>   	struct amdgpu_device *adev = ring->adev;
>   	unsigned vmhub = ring->funcs->vmhub;
> @@ -261,18 +271,21 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   	struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
>   	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
>   	struct dma_fence *updates = sync->last_vm_update;
> -	int r = 0;
> -	struct dma_fence *flushed, *tmp;
>   	bool needs_flush = vm->use_cpu_for_update;
> +	int r = 0;
> +
> +	if (updates && id->flushed_updates &&
> +	    updates->context == id->flushed_updates->context &&
> +	    !dma_fence_is_later(updates, id->flushed_updates))
> +	    updates = NULL;
> +
> +	if (id->owner != vm->entity.fence_context ||
> +	    job->vm_pd_addr != id->pd_gpu_addr ||
> +	    updates || !id->last_flush ||
> +	    (id->last_flush->context != fence_context &&
> +	     !dma_fence_is_signaled(id->last_flush))) {
> +		struct dma_fence *tmp;
>   
> -	flushed  = id->flushed_updates;
> -	if ((id->owner != vm->entity.fence_context) ||
> -	    (job->vm_pd_addr != id->pd_gpu_addr) ||
> -	    (updates && (!flushed || updates->context != flushed->context ||
> -			dma_fence_is_later(updates, flushed))) ||
> -	    (!id->last_flush || (id->last_flush->context != fence_context &&
> -				 !dma_fence_is_signaled(id->last_flush)))) {
> -		needs_flush = true;
>   		/* to prevent one context starved by another context */
>   		id->pd_gpu_addr = 0;
>   		tmp = amdgpu_sync_peek_fence(&id->active, ring);
> @@ -280,6 +293,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   			r = amdgpu_sync_fence(adev, sync, tmp, false);
>   			return r;
>   		}
> +		needs_flush = true;
>   	}
>   
>   	/* Good we can use this VMID. Remember this submission as
> @@ -287,10 +301,9 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   	*/
>   	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
>   	if (r)
> -		goto out;
> +		return r;
>   
> -	if (updates && (!flushed || updates->context != flushed->context ||
> -			dma_fence_is_later(updates, flushed))) {
> +	if (updates) {
>   		dma_fence_put(id->flushed_updates);
>   		id->flushed_updates = dma_fence_get(updates);
>   	}
> @@ -304,8 +317,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   	job->vmid = id - id_mgr->ids;
>   	job->pasid = vm->pasid;
>   	trace_amdgpu_vm_grab_id(vm, ring, job);
> -out:
> -	return r;
> +	return 0;
>   }
>   
>   /**
> @@ -315,6 +327,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>    * @ring: ring we want to submit job to
>    * @sync: sync object where we add dependencies
>    * @fence: fence protecting ID from reuse
> + * @job: job who wants to use the VMID
>    *
>    * Allocate an id for the vm, adding fences to the sync obj as necessary.
>    */
> @@ -336,8 +349,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   		goto error;
>   
>   	if (vm->reserved_vmid[vmhub]) {
> -		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
> -						     fence, job);
> +		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
>   		mutex_unlock(&id_mgr->lock);
>   		return r;
>   	}

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 6/8] drm/amdgpu: move reusing VMIDs into separate function
       [not found]     ` <20180131154756.3374-6-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  6:23       ` Chunming Zhou
  0 siblings, 0 replies; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  6:23 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Reviewed-by: Chunming Zhou <david1.zhou@amd.com>


On 2018年01月31日 23:47, Christian König wrote:
> Let's try this once more.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 130 ++++++++++++++++++++------------
>   1 file changed, 81 insertions(+), 49 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index 86d012b21554..a139c4e2dc53 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -321,58 +321,51 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
>   }
>   
>   /**
> - * amdgpu_vm_grab_id - allocate the next free VMID
> + * amdgpu_vm_grab_used - try to reuse a VMID
>    *
>    * @vm: vm to allocate id for
>    * @ring: ring we want to submit job to
>    * @sync: sync object where we add dependencies
>    * @fence: fence protecting ID from reuse
>    * @job: job who wants to use the VMID
> + * @id: resulting VMID
>    *
> - * Allocate an id for the vm, adding fences to the sync obj as necessary.
> + * Try to reuse a VMID for this submission.
>    */
> -int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> -		     struct amdgpu_sync *sync, struct dma_fence *fence,
> -		     struct amdgpu_job *job)
> +static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
> +				 struct amdgpu_ring *ring,
> +				 struct amdgpu_sync *sync,
> +				 struct dma_fence *fence,
> +				 struct amdgpu_job *job,
> +				 struct amdgpu_vmid **id)
>   {
>   	struct amdgpu_device *adev = ring->adev;
>   	unsigned vmhub = ring->funcs->vmhub;
>   	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
>   	uint64_t fence_context = adev->fence_context + ring->idx;
>   	struct dma_fence *updates = sync->last_vm_update;
> -	struct amdgpu_vmid *id, *idle;
> -	int r = 0;
> -
> -	mutex_lock(&id_mgr->lock);
> -	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
> -	if (r || !idle)
> -		goto error;
> -
> -	if (vm->reserved_vmid[vmhub]) {
> -		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
> -		mutex_unlock(&id_mgr->lock);
> -		return r;
> -	}
> +	int r;
>   
>   	job->vm_needs_flush = vm->use_cpu_for_update;
> +
>   	/* Check if we can use a VMID already assigned to this VM */
> -	list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
> -		struct dma_fence *flushed;
> +	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
>   		bool needs_flush = vm->use_cpu_for_update;
> +		struct dma_fence *flushed;
>   
>   		/* Check all the prerequisites to using this VMID */
> -		if (id->owner != vm->entity.fence_context)
> +		if ((*id)->owner != vm->entity.fence_context)
>   			continue;
>   
> -		if (job->vm_pd_addr != id->pd_gpu_addr)
> +		if ((*id)->pd_gpu_addr != job->vm_pd_addr)
>   			continue;
>   
> -		if (!id->last_flush ||
> -		    (id->last_flush->context != fence_context &&
> -		     !dma_fence_is_signaled(id->last_flush)))
> +		if (!(*id)->last_flush ||
> +		    ((*id)->last_flush->context != fence_context &&
> +		     !dma_fence_is_signaled((*id)->last_flush)))
>   			needs_flush = true;
>   
> -		flushed  = id->flushed_updates;
> +		flushed  = (*id)->flushed_updates;
>   		if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
>   			needs_flush = true;
>   
> @@ -380,44 +373,83 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   		if (adev->asic_type < CHIP_VEGA10 && needs_flush)
>   			continue;
>   
> -		/* Good we can use this VMID. Remember this submission as
> +		/* Good, we can use this VMID. Remember this submission as
>   		 * user of the VMID.
>   		 */
> -		r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
> +		r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
>   		if (r)
> -			goto error;
> +			return r;
>   
>   		if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
> -			dma_fence_put(id->flushed_updates);
> -			id->flushed_updates = dma_fence_get(updates);
> +			dma_fence_put((*id)->flushed_updates);
> +			(*id)->flushed_updates = dma_fence_get(updates);
>   		}
>   
> -		if (needs_flush)
> -			goto needs_flush;
> -		else
> -			goto no_flush_needed;
> -
> +		job->vm_needs_flush |= needs_flush;
> +		return 0;
>   	}
>   
> -	/* Still no ID to use? Then use the idle one found earlier */
> -	id = idle;
> +	*id = NULL;
> +	return 0;
> +}
>   
> -	/* Remember this submission as user of the VMID */
> -	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
> +/**
> + * amdgpu_vm_grab_id - allocate the next free VMID
> + *
> + * @vm: vm to allocate id for
> + * @ring: ring we want to submit job to
> + * @sync: sync object where we add dependencies
> + * @fence: fence protecting ID from reuse
> + * @job: job who wants to use the VMID
> + *
> + * Allocate an id for the vm, adding fences to the sync obj as necessary.
> + */
> +int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> +		     struct amdgpu_sync *sync, struct dma_fence *fence,
> +		     struct amdgpu_job *job)
> +{
> +	struct amdgpu_device *adev = ring->adev;
> +	unsigned vmhub = ring->funcs->vmhub;
> +	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> +	struct dma_fence *updates = sync->last_vm_update;
> +	struct amdgpu_vmid *id, *idle;
> +	int r = 0;
> +
> +	mutex_lock(&id_mgr->lock);
> +	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
> +	if (r || !idle)
> +		goto error;
> +
> +	if (vm->reserved_vmid[vmhub]) {
> +		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
> +		mutex_unlock(&id_mgr->lock);
> +		return r;
> +	}
> +
> +	r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
>   	if (r)
>   		goto error;
>   
> -	id->pd_gpu_addr = job->vm_pd_addr;
> -	dma_fence_put(id->flushed_updates);
> -	id->flushed_updates = dma_fence_get(updates);
> -	id->owner = vm->entity.fence_context;
> +	if (!id) {
> +		/* Still no ID to use? Then use the idle one found earlier */
> +		id = idle;
>   
> -needs_flush:
> -	job->vm_needs_flush = true;
> -	dma_fence_put(id->last_flush);
> -	id->last_flush = NULL;
> +		/* Remember this submission as user of the VMID */
> +		r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
> +		if (r)
> +			goto error;
>   
> -no_flush_needed:
> +		id->pd_gpu_addr = job->vm_pd_addr;
> +		dma_fence_put(id->flushed_updates);
> +		id->flushed_updates = dma_fence_get(updates);
> +		id->owner = vm->entity.fence_context;
> +		job->vm_needs_flush = true;
> +	}
> +
> +	if (job->vm_needs_flush) {
> +		dma_fence_put(id->last_flush);
> +		id->last_flush = NULL;
> +	}
>   	list_move_tail(&id->list, &id_mgr->ids_lru);
>   
>   	job->vmid = id - id_mgr->ids;

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 8/8] drm/amdgpu: cache the fence to wait for a VMID
       [not found]     ` <20180131154756.3374-8-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  6:32       ` Chunming Zhou
  0 siblings, 0 replies; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  6:32 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2018年01月31日 23:47, Christian König wrote:
> Beneficial when a lot of processes are waiting for VMIDs.
Yeah, we can save re-creating fence array when vmid is used up.

Reviewed-by: Chunming Zhou <david1.zhou@amd.com>

>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c  | 7 +++++--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 3 +++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 +
>   3 files changed, 9 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index fff575fd55c0..b4526f98f0c0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -205,6 +205,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
>   	unsigned i;
>   	int r;
>   
> +	if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
> +		return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
> +
>   	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
>   	if (!fences)
>   		return -ENOMEM;
> @@ -239,9 +242,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
>   		}
>   
>   		r = amdgpu_sync_fence(adev, sync, &array->base, false);
> -		dma_fence_put(&array->base);
> +		dma_fence_put(ring->vmid_wait);
> +		ring->vmid_wait = &array->base;
>   		return r;
> -
>   	}
>   	kfree(fences);
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> index 13044e66dcaf..e223b0f6417b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> @@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
>   
>   	amdgpu_debugfs_ring_fini(ring);
>   
> +	dma_fence_put(ring->vmid_wait);
> +	ring->vmid_wait = NULL;
> +
>   	ring->adev->rings[ring->idx] = NULL;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> index 867f53332305..075976855651 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> @@ -196,6 +196,7 @@ struct amdgpu_ring {
>   	u64			cond_exe_gpu_addr;
>   	volatile u32		*cond_exe_cpu_addr;
>   	unsigned		vm_inv_eng;
> +	struct dma_fence	*vmid_wait;
>   	bool			has_compute_vm_bug;
>   
>   	atomic_t		num_jobs[DRM_SCHED_PRIORITY_MAX];

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/8] drm/amdgpu: make VMID assignment more fair
       [not found]     ` <2e5a2349-5036-dff7-ab06-286bcf9de9b3-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  8:36       ` Christian König
       [not found]         ` <d3c6258b-2015-6d11-65cf-06e6449d76c7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-02-01  8:36 UTC (permalink / raw)
  To: Chunming Zhou, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

> reserved vmid is independent on id_mgr
And that is exactly the reason why I want to have that here.

If we don't fix this reserving a VMID would otherwise give the process 
an unfair advantage while scheduling jobs.

Regards,
Christian.

Am 01.02.2018 um 05:42 schrieb Chunming Zhou:
> NAK, reserved vmid is independent on id_mgr, which is removed from id 
> mgr when process allocates reserved vmid.
>
>
> Regards,
>
> David Zhou
>
>
> On 2018年01月31日 23:47, Christian König wrote:
>> To guarantee fairness between processes grab reserved VMID only when
>> there is an idle one.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 12 +++++++-----
>>   1 file changed, 7 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> index c13cf7e79b2e..7a3d0de7425d 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> @@ -268,11 +268,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>> struct amdgpu_ring *ring,
>>       int r = 0;
>>         mutex_lock(&id_mgr->lock);
>> -    if (vm->reserved_vmid[vmhub]) {
>> -        r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, 
>> job);
>> -        mutex_unlock(&id_mgr->lock);
>> -        return r;
>> -    }
>>       fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, 
>> GFP_KERNEL);
>>       if (!fences) {
>>           mutex_unlock(&id_mgr->lock);
>> @@ -319,6 +314,13 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>> struct amdgpu_ring *ring,
>>       }
>>       kfree(fences);
>>   +    if (vm->reserved_vmid[vmhub]) {
>> +        r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
>> +                             fence, job);
>> +        mutex_unlock(&id_mgr->lock);
>> +        return r;
>> +    }
>> +
>>       job->vm_needs_flush = vm->use_cpu_for_update;
>>       /* Check if we can use a VMID already assigned to this VM */
>>       list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 3/8] drm/amdgpu: make VMID owner none atomic
       [not found]         ` <bcaaf808-e368-b386-946c-e6cbdaad690b-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  8:40           ` Christian König
  0 siblings, 0 replies; 20+ messages in thread
From: Christian König @ 2018-02-01  8:40 UTC (permalink / raw)
  To: Chunming Zhou, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 01.02.2018 um 06:44 schrieb Chunming Zhou:
>
>
> On 2018年01月31日 23:47, Christian König wrote:
>> The variable is protected by the VMID mutex anyway.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 10 +++++-----
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h |  2 +-
>>   2 files changed, 6 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> index fbe958f7cb5b..8374fe870e8c 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>> @@ -267,7 +267,7 @@ static int 
>> amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>>         flushed  = id->flushed_updates;
>>       if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
>> -        (atomic64_read(&id->owner) != vm->entity.fence_context) ||
>> +        (id->owner != vm->entity.fence_context) ||
>>           (job->vm_pd_addr != id->pd_gpu_addr) ||
>>           (updates && (!flushed || updates->context != 
>> flushed->context ||
>>               dma_fence_is_later(updates, flushed))) ||
>> @@ -296,7 +296,7 @@ static int 
>> amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>>           id->flushed_updates = dma_fence_get(updates);
>>       }
>>       id->pd_gpu_addr = job->vm_pd_addr;
>> -    atomic64_set(&id->owner, vm->entity.fence_context);
>> +    id->owner = vm->entity.fence_context;
>>       job->vm_needs_flush = needs_flush;
>>       if (needs_flush) {
>>           dma_fence_put(id->last_flush);
>> @@ -353,7 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct 
>> amdgpu_ring *ring,
>>           if (amdgpu_vmid_had_gpu_reset(adev, id))
>>               continue;
>>   -        if (atomic64_read(&id->owner) != vm->entity.fence_context)
>> +        if (id->owner != vm->entity.fence_context)
>>               continue;
>>             if (job->vm_pd_addr != id->pd_gpu_addr)
>> @@ -402,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct 
>> amdgpu_ring *ring,
>>       id->pd_gpu_addr = job->vm_pd_addr;
>>       dma_fence_put(id->flushed_updates);
>>       id->flushed_updates = dma_fence_get(updates);
>> -    atomic64_set(&id->owner, vm->entity.fence_context);
>> +    id->owner = vm->entity.fence_context;
>>     needs_flush:
>>       job->vm_needs_flush = true;
>> @@ -482,7 +482,7 @@ void amdgpu_vmid_reset(struct amdgpu_device 
>> *adev, unsigned vmhub,
>>       struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
>>       struct amdgpu_vmid *id = &id_mgr->ids[vmid];
>>   -    atomic64_set(&id->owner, 0);
>> +    id->owner = 0;
> here is not protect by id_mgr mutex, you can make another patch to add 
> mutex for this function.

That function is only called in case of suspend/resume and GPU reset, 
but adding the extra mutex here probably won't hurt and is indeed cleaner.

Christian.

>
> Regards,
> David Zhou
>>       id->gds_base = 0;
>>       id->gds_size = 0;
>>       id->gws_base = 0;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
>> index 38f37c16fc5e..20d4eca6cd6a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
>> @@ -43,7 +43,7 @@ struct amdgpu_vmid {
>>       struct list_head    list;
>>       struct amdgpu_sync    active;
>>       struct dma_fence    *last_flush;
>> -    atomic64_t        owner;
>> +    uint64_t        owner;
>>         uint64_t        pd_gpu_addr;
>>       /* last flushed PD/PT update */
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/8] drm/amdgpu: make VMID assignment more fair
       [not found]         ` <d3c6258b-2015-6d11-65cf-06e6449d76c7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-02-01  8:55           ` Chunming Zhou
       [not found]             ` <a8418ef0-6bec-af57-0555-cf48b38fa09a-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  8:55 UTC (permalink / raw)
  To: christian.koenig-5C7GfCeVMHo, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2018年02月01日 16:36, Christian König wrote:
>> reserved vmid is independent on id_mgr
> And that is exactly the reason why I want to have that here.
>
> If we don't fix this reserving a VMID would otherwise give the process 
> an unfair advantage while scheduling jobs.
ok, I see your mean, that's same why you always make idle vmid for 
re-using vmid. Adding this explain to patch commit is better.

Acked-by: Chunming Zhou <david1.zhou@amd.com> for this patch and patch 
#7 then.

Regards,
David Zhou
>
> Regards,
> Christian.
>
> Am 01.02.2018 um 05:42 schrieb Chunming Zhou:
>> NAK, reserved vmid is independent on id_mgr, which is removed from id 
>> mgr when process allocates reserved vmid.
>>
>>
>> Regards,
>>
>> David Zhou
>>
>>
>> On 2018年01月31日 23:47, Christian König wrote:
>>> To guarantee fairness between processes grab reserved VMID only when
>>> there is an idle one.
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 12 +++++++-----
>>>   1 file changed, 7 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>> index c13cf7e79b2e..7a3d0de7425d 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>> @@ -268,11 +268,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>>> struct amdgpu_ring *ring,
>>>       int r = 0;
>>>         mutex_lock(&id_mgr->lock);
>>> -    if (vm->reserved_vmid[vmhub]) {
>>> -        r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, 
>>> job);
>>> -        mutex_unlock(&id_mgr->lock);
>>> -        return r;
>>> -    }
>>>       fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, 
>>> GFP_KERNEL);
>>>       if (!fences) {
>>>           mutex_unlock(&id_mgr->lock);
>>> @@ -319,6 +314,13 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>>> struct amdgpu_ring *ring,
>>>       }
>>>       kfree(fences);
>>>   +    if (vm->reserved_vmid[vmhub]) {
>>> +        r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
>>> +                             fence, job);
>>> +        mutex_unlock(&id_mgr->lock);
>>> +        return r;
>>> +    }
>>> +
>>>       job->vm_needs_flush = vm->use_cpu_for_update;
>>>       /* Check if we can use a VMID already assigned to this VM */
>>>       list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
>>
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/8] drm/amdgpu: make VMID assignment more fair
       [not found]             ` <a8418ef0-6bec-af57-0555-cf48b38fa09a-5C7GfCeVMHo@public.gmane.org>
@ 2018-02-01  9:05               ` Christian König
       [not found]                 ` <d4989e55-7331-9e1b-327b-8cf6789fefc3-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 20+ messages in thread
From: Christian König @ 2018-02-01  9:05 UTC (permalink / raw)
  To: Chunming Zhou, christian.koenig-5C7GfCeVMHo,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 01.02.2018 um 09:55 schrieb Chunming Zhou:
>
>
> On 2018年02月01日 16:36, Christian König wrote:
>>> reserved vmid is independent on id_mgr
>> And that is exactly the reason why I want to have that here.
>>
>> If we don't fix this reserving a VMID would otherwise give the 
>> process an unfair advantage while scheduling jobs.
> ok, I see your mean, that's same why you always make idle vmid for 
> re-using vmid.
Yes, exactly.

> Adding this explain to patch commit is better.
Good point.

>
> Acked-by: Chunming Zhou <david1.zhou@amd.com> for this patch and patch 
> #7 then.

With the addition of the mutex can I get your rb or ab for patch #3 as 
well? Cause then it looks ready for commit.

Thanks,
Christian.

>
> Regards,
> David Zhou
>>
>> Regards,
>> Christian.
>>
>> Am 01.02.2018 um 05:42 schrieb Chunming Zhou:
>>> NAK, reserved vmid is independent on id_mgr, which is removed from 
>>> id mgr when process allocates reserved vmid.
>>>
>>>
>>> Regards,
>>>
>>> David Zhou
>>>
>>>
>>> On 2018年01月31日 23:47, Christian König wrote:
>>>> To guarantee fairness between processes grab reserved VMID only when
>>>> there is an idle one.
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 12 +++++++-----
>>>>   1 file changed, 7 insertions(+), 5 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>>> index c13cf7e79b2e..7a3d0de7425d 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>>> @@ -268,11 +268,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>>>> struct amdgpu_ring *ring,
>>>>       int r = 0;
>>>>         mutex_lock(&id_mgr->lock);
>>>> -    if (vm->reserved_vmid[vmhub]) {
>>>> -        r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, 
>>>> fence, job);
>>>> -        mutex_unlock(&id_mgr->lock);
>>>> -        return r;
>>>> -    }
>>>>       fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, 
>>>> GFP_KERNEL);
>>>>       if (!fences) {
>>>>           mutex_unlock(&id_mgr->lock);
>>>> @@ -319,6 +314,13 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>>>> struct amdgpu_ring *ring,
>>>>       }
>>>>       kfree(fences);
>>>>   +    if (vm->reserved_vmid[vmhub]) {
>>>> +        r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
>>>> +                             fence, job);
>>>> +        mutex_unlock(&id_mgr->lock);
>>>> +        return r;
>>>> +    }
>>>> +
>>>>       job->vm_needs_flush = vm->use_cpu_for_update;
>>>>       /* Check if we can use a VMID already assigned to this VM */
>>>>       list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
>>>
>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/8] drm/amdgpu: make VMID assignment more fair
       [not found]                 ` <d4989e55-7331-9e1b-327b-8cf6789fefc3-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-02-01  9:12                   ` Chunming Zhou
  0 siblings, 0 replies; 20+ messages in thread
From: Chunming Zhou @ 2018-02-01  9:12 UTC (permalink / raw)
  To: christian.koenig-5C7GfCeVMHo, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2018年02月01日 17:05, Christian König wrote:
> With the addition of the mutex can I get your rb or ab for patch #3 as 
> well? Cause then it looks ready for commit.
Feel free to add my RB for that one.

Regards,
David Zhou
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2018-02-01  9:12 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-31 15:47 [PATCH 1/8] drm/amdgpu: make VMID assignment more fair Christian König
     [not found] ` <20180131154756.3374-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-01-31 15:47   ` [PATCH 2/8] drm/amdgpu: split finding idle VMID into separate function Christian König
     [not found]     ` <20180131154756.3374-2-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-02-01  5:40       ` Chunming Zhou
2018-01-31 15:47   ` [PATCH 3/8] drm/amdgpu: make VMID owner none atomic Christian König
     [not found]     ` <20180131154756.3374-3-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-02-01  5:44       ` Chunming Zhou
     [not found]         ` <bcaaf808-e368-b386-946c-e6cbdaad690b-5C7GfCeVMHo@public.gmane.org>
2018-02-01  8:40           ` Christian König
2018-01-31 15:47   ` [PATCH 4/8] drm/amdgpu: stop checking GPU reset counter during VMID grab Christian König
     [not found]     ` <20180131154756.3374-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-02-01  5:48       ` Chunming Zhou
2018-01-31 15:47   ` [PATCH 5/8] drm/amdgpu: cleanup and simplify amdgpu_vmid_grab_reserved Christian König
     [not found]     ` <20180131154756.3374-5-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-02-01  6:16       ` Chunming Zhou
2018-01-31 15:47   ` [PATCH 6/8] drm/amdgpu: move reusing VMIDs into separate function Christian König
     [not found]     ` <20180131154756.3374-6-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-02-01  6:23       ` Chunming Zhou
2018-01-31 15:47   ` [PATCH 7/8] drm/amdgpu: restructure amdgpu_vmid_grab Christian König
2018-01-31 15:47   ` [PATCH 8/8] drm/amdgpu: cache the fence to wait for a VMID Christian König
     [not found]     ` <20180131154756.3374-8-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-02-01  6:32       ` Chunming Zhou
2018-02-01  4:42   ` [PATCH 1/8] drm/amdgpu: make VMID assignment more fair Chunming Zhou
     [not found]     ` <2e5a2349-5036-dff7-ab06-286bcf9de9b3-5C7GfCeVMHo@public.gmane.org>
2018-02-01  8:36       ` Christian König
     [not found]         ` <d3c6258b-2015-6d11-65cf-06e6449d76c7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-02-01  8:55           ` Chunming Zhou
     [not found]             ` <a8418ef0-6bec-af57-0555-cf48b38fa09a-5C7GfCeVMHo@public.gmane.org>
2018-02-01  9:05               ` Christian König
     [not found]                 ` <d4989e55-7331-9e1b-327b-8cf6789fefc3-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-02-01  9:12                   ` Chunming Zhou

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.