All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c
@ 2019-12-04 15:38 Christian König
  2019-12-04 15:38 ` [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates Christian König
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Christian König @ 2019-12-04 15:38 UTC (permalink / raw)
  To: amd-gfx, felix.kuehling, philip.yang

When a page tables needs to be evicted the VM code should
decide if that is possible or not.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |  5 +----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 22 ++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  1 +
 3 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..81f6764f1ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1489,11 +1489,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	struct dma_fence *f;
 	int i;
 
-	/* Don't evict VM page tables while they are busy, otherwise we can't
-	 * cleanly handle page faults.
-	 */
 	if (bo->type == ttm_bo_type_kernel &&
-	    !dma_resv_test_signaled_rcu(bo->base.resv, true))
+	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
 		return false;
 
 	/* If bo is a KFD BO, check if the bo belongs to the current process.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a94c4faa5af1..a22bd57129d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2503,6 +2503,28 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 	kfree(bo_va);
 }
 
+/**
+ * amdgpu_vm_evictable - check if we can evict a VM
+ *
+ * @bo: A page table of the VM.
+ *
+ * Check if it is possible to evict a VM.
+ */
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
+{
+	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
+
+	/* Page tables of a destroyed VM can go away immediately */
+	if (!bo_base || !bo_base->vm)
+		return true;
+
+	/* Don't evict VM page tables while they are busy */
+	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+		return false;
+
+	return true;
+}
+
 /**
  * amdgpu_vm_bo_invalidate - mark the bo as invalid
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 76fcf853035c..db561765453b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -381,6 +381,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			struct amdgpu_bo_va *bo_va,
 			bool clear);
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			     struct amdgpu_bo *bo, bool evicted);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates
  2019-12-04 15:38 [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c Christian König
@ 2019-12-04 15:38 ` Christian König
  2019-12-04 16:23   ` Zeng, Oak
  2019-12-04 23:05   ` Felix Kuehling
  2019-12-04 15:38 ` [PATCH 3/3] drm/amdgpu: stop adding VM updates fences to the resv obj Christian König
  2019-12-05  1:31 ` [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c Felix Kuehling
  2 siblings, 2 replies; 7+ messages in thread
From: Christian König @ 2019-12-04 15:38 UTC (permalink / raw)
  To: amd-gfx, felix.kuehling, philip.yang

Allows us to reduce the overhead while syncing to fences a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   | 18 +++++++-----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 22 +++++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h |  2 ++
 3 files changed, 28 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9e0c99760367..f21475352b88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -799,29 +799,25 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->job->sync,
-			      fpriv->prt_va->last_pt_update, false);
+	r = amdgpu_sync_vm_fence(adev, &p->job->sync,
+				 fpriv->prt_va->last_pt_update);
 	if (r)
 		return r;
 
 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
-		struct dma_fence *f;
-
 		bo_va = fpriv->csa_va;
 		BUG_ON(!bo_va);
 		r = amdgpu_vm_bo_update(adev, bo_va, false);
 		if (r)
 			return r;
 
-		f = bo_va->last_pt_update;
-		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+		r = amdgpu_sync_vm_fence(adev, &p->job->sync,
+					 bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
 
 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-		struct dma_fence *f;
-
 		/* ignore duplicates */
 		bo = ttm_to_amdgpu_bo(e->tv.bo);
 		if (!bo)
@@ -835,8 +831,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 		if (r)
 			return r;
 
-		f = bo_va->last_pt_update;
-		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+		r = amdgpu_sync_vm_fence(adev, &p->job->sync,
+					 bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
@@ -849,7 +845,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
+	r = amdgpu_sync_vm_fence(adev, &p->job->sync, vm->last_update);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 95e5e93edd18..9b28c1eb5f49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -161,9 +161,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 
 	if (!f)
 		return 0;
-	if (amdgpu_sync_same_dev(adev, f) &&
-	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
-		amdgpu_sync_keep_later(&sync->last_vm_update, f);
 
 	if (amdgpu_sync_add_later(sync, f, explicit))
 		return 0;
@@ -179,6 +176,25 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 	return 0;
 }
 
+/**
+ * amdgpu_sync_vm_fence - remember to sync to this VM fence
+ *
+ * @adev: amdgpu device
+ * @sync: sync object to add fence to
+ * @fence: the VM fence to add
+ *
+ * Add the fence to the sync object and remember it as VM update.
+ */
+int amdgpu_sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+			 struct dma_fence *fence)
+{
+	if (!fence)
+		return 0;
+
+	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
+	return amdgpu_sync_fence(adev, sync, fence, false);
+}
+
 /**
  * amdgpu_sync_resv - sync to a reservation object
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index b5f1778a2319..ac210dd34371 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -42,6 +42,8 @@ struct amdgpu_sync {
 void amdgpu_sync_create(struct amdgpu_sync *sync);
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		      struct dma_fence *f, bool explicit);
+int amdgpu_sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+			 struct dma_fence *fence);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
 		     struct dma_resv *resv,
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] drm/amdgpu: stop adding VM updates fences to the resv obj
  2019-12-04 15:38 [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c Christian König
  2019-12-04 15:38 ` [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates Christian König
@ 2019-12-04 15:38 ` Christian König
  2019-12-05  0:15   ` Felix Kuehling
  2019-12-05  1:31 ` [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c Felix Kuehling
  2 siblings, 1 reply; 7+ messages in thread
From: Christian König @ 2019-12-04 15:38 UTC (permalink / raw)
  To: amd-gfx, felix.kuehling, philip.yang

This way we can do updates even without the resv obj locked.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c    |  6 ++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c      | 30 ++++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h      |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 +++++---
 4 files changed, 40 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 9b28c1eb5f49..7f17c06b8a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -241,10 +241,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 			/* VM updates are only interesting
 			 * for other VM updates and moves.
 			 */
-			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
-			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
-			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
-			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+			if (owner == AMDGPU_FENCE_OWNER_VM &&
+			    fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
 				continue;
 
 			/* Ignore fence from the same owner and explicit one as
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a22bd57129d1..0d700e8154c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -562,8 +562,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 {
 	entry->priority = 0;
 	entry->tv.bo = &vm->root.base.bo->tbo;
-	/* One for the VM updates, one for TTM and one for the CS job */
-	entry->tv.num_shared = 3;
+	/* One for TTM and one for the CS job */
+	entry->tv.num_shared = 2;
 	entry->user_pages = NULL;
 	list_add(&entry->tv.head, validated);
 }
@@ -2522,6 +2522,11 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
 	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
 		return false;
 
+	/* Don't evict VM page tables while they are updated */
+	if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
+	    !dma_fence_is_signaled(bo_base->vm->last_delayed))
+		return false;
+
 	return true;
 }
 
@@ -2687,8 +2692,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-	return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
-						   true, true, timeout);
+	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+					    true, true, timeout);
+	if (timeout <= 0)
+		return timeout;
+
+	timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
+	if (timeout <= 0)
+		return timeout;
+
+	return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
 }
 
 /**
@@ -2757,6 +2770,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	else
 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
 	vm->last_update = NULL;
+	vm->last_direct = dma_fence_get_stub();
+	vm->last_delayed = dma_fence_get_stub();
 
 	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
@@ -2807,6 +2822,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	vm->root.base.bo = NULL;
 
 error_free_delayed:
+	dma_fence_put(vm->last_direct);
+	dma_fence_put(vm->last_delayed);
 	drm_sched_entity_destroy(&vm->delayed);
 
 error_free_direct:
@@ -3007,6 +3024,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		vm->pasid = 0;
 	}
 
+	dma_fence_wait(vm->last_direct, false);
+	dma_fence_put(vm->last_direct);
+	dma_fence_wait(vm->last_delayed, false);
+	dma_fence_put(vm->last_delayed);
+
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
 			amdgpu_vm_prt_fini(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index db561765453b..d93ea9ad879e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -269,6 +269,10 @@ struct amdgpu_vm {
 	struct drm_sched_entity	direct;
 	struct drm_sched_entity	delayed;
 
+	/* Last submission to the scheduler entities */
+	struct dma_fence	*last_direct;
+	struct dma_fence	*last_delayed;
+
 	unsigned int		pasid;
 	/* dedicated to vm */
 	struct amdgpu_vmid	*reserved_vmid[AMDGPU_MAX_VMHUBS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 832db59f441e..04e79c75c87e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -95,11 +95,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 				 struct dma_fence **fence)
 {
-	struct amdgpu_bo *root = p->vm->root.base.bo;
 	struct amdgpu_ib *ib = p->job->ibs;
 	struct drm_sched_entity *entity;
+	struct dma_fence *f, *tmp;
 	struct amdgpu_ring *ring;
-	struct dma_fence *f;
 	int r;
 
 	entity = p->direct ? &p->vm->direct : &p->vm->delayed;
@@ -112,7 +111,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 	if (r)
 		goto error;
 
-	amdgpu_bo_fence(root, f, true);
+	tmp = dma_fence_get(f);
+	if (p->direct)
+		swap(p->vm->last_direct, tmp);
+	else
+		swap(p->vm->last_delayed, tmp);
+	dma_fence_put(tmp);
+
 	if (fence && !p->direct)
 		swap(*fence, f);
 	dma_fence_put(f);
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* RE: [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates
  2019-12-04 15:38 ` [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates Christian König
@ 2019-12-04 16:23   ` Zeng, Oak
  2019-12-04 23:05   ` Felix Kuehling
  1 sibling, 0 replies; 7+ messages in thread
From: Zeng, Oak @ 2019-12-04 16:23 UTC (permalink / raw)
  To: Christian König, amd-gfx, Kuehling, Felix, Yang, Philip

[AMD Official Use Only - Internal Distribution Only]



Regards,
Oak

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Christian König
Sent: Wednesday, December 4, 2019 10:38 AM
To: amd-gfx@lists.freedesktop.org; Kuehling, Felix <Felix.Kuehling@amd.com>; Yang, Philip <Philip.Yang@amd.com>
Subject: [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates

Allows us to reduce the overhead while syncing to fences a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   | 18 +++++++-----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 22 +++++++++++++++++++---  drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h |  2 ++
 3 files changed, 28 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9e0c99760367..f21475352b88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -799,29 +799,25 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->job->sync,
-			      fpriv->prt_va->last_pt_update, false);
+	r = amdgpu_sync_vm_fence(adev, &p->job->sync,
+				 fpriv->prt_va->last_pt_update);
 	if (r)
 		return r;
 
 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
-		struct dma_fence *f;
-
 		bo_va = fpriv->csa_va;
 		BUG_ON(!bo_va);
 		r = amdgpu_vm_bo_update(adev, bo_va, false);
 		if (r)
 			return r;
 
-		f = bo_va->last_pt_update;
-		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+		r = amdgpu_sync_vm_fence(adev, &p->job->sync,
+					 bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
 
 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-		struct dma_fence *f;
-
 		/* ignore duplicates */
 		bo = ttm_to_amdgpu_bo(e->tv.bo);
 		if (!bo)
@@ -835,8 +831,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 		if (r)
 			return r;
 
-		f = bo_va->last_pt_update;
-		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+		r = amdgpu_sync_vm_fence(adev, &p->job->sync,
+					 bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
@@ -849,7 +845,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
+	r = amdgpu_sync_vm_fence(adev, &p->job->sync, vm->last_update);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 95e5e93edd18..9b28c1eb5f49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -161,9 +161,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 
 	if (!f)
 		return 0;
-	if (amdgpu_sync_same_dev(adev, f) &&
-	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
-		amdgpu_sync_keep_later(&sync->last_vm_update, f);
 
 	if (amdgpu_sync_add_later(sync, f, explicit))
 		return 0;
@@ -179,6 +176,25 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 	return 0;
 }
 
+/**
+ * amdgpu_sync_vm_fence - remember to sync to this VM fence
+ *
+ * @adev: amdgpu device
+ * @sync: sync object to add fence to
+ * @fence: the VM fence to add
+ *
+ * Add the fence to the sync object and remember it as VM update.
+ */
+int amdgpu_sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+			 struct dma_fence *fence)
+{
+	if (!fence)
+		return 0;
+
+	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
+	return amdgpu_sync_fence(adev, sync, fence, false); }
Is there actual functional change? I saw you re-organized the codes a little by introducing  the amdgpu_sync_vm_fence function. But when you add fence to sync obj, the explicit parameter is still false, just the same as before. From the title of the change, you said explicitly sync to vm update - did you mean "true" here for the last parameter?
+
 /**
  * amdgpu_sync_resv - sync to a reservation object
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index b5f1778a2319..ac210dd34371 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -42,6 +42,8 @@ struct amdgpu_sync {
 void amdgpu_sync_create(struct amdgpu_sync *sync);  int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		      struct dma_fence *f, bool explicit);
+int amdgpu_sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+			 struct dma_fence *fence);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
 		     struct dma_resv *resv,
--
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=02%7C01%7Coak.zeng%40amd.com%7C2551d12b28594fe57ecc08d778d00508%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637110707189428270&amp;sdata=qq%2BckhJvYeM3y%2FGuRWxiRJ0PfljdzPWoieqmd3cAp14%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates
  2019-12-04 15:38 ` [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates Christian König
  2019-12-04 16:23   ` Zeng, Oak
@ 2019-12-04 23:05   ` Felix Kuehling
  1 sibling, 0 replies; 7+ messages in thread
From: Felix Kuehling @ 2019-12-04 23:05 UTC (permalink / raw)
  To: Christian König, amd-gfx, philip.yang

On 2019-12-04 10:38 a.m., Christian König wrote:
> Allows us to reduce the overhead while syncing to fences a bit.

This allows some further simplification. See two comments inline.


>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   | 18 +++++++-----------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 22 +++++++++++++++++++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h |  2 ++
>   3 files changed, 28 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 9e0c99760367..f21475352b88 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -799,29 +799,25 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
>   	if (r)
>   		return r;
>   
> -	r = amdgpu_sync_fence(adev, &p->job->sync,
> -			      fpriv->prt_va->last_pt_update, false);
> +	r = amdgpu_sync_vm_fence(adev, &p->job->sync,
> +				 fpriv->prt_va->last_pt_update);
>   	if (r)
>   		return r;
>   
>   	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
> -		struct dma_fence *f;
> -
>   		bo_va = fpriv->csa_va;
>   		BUG_ON(!bo_va);
>   		r = amdgpu_vm_bo_update(adev, bo_va, false);
>   		if (r)
>   			return r;
>   
> -		f = bo_va->last_pt_update;
> -		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
> +		r = amdgpu_sync_vm_fence(adev, &p->job->sync,
> +					 bo_va->last_pt_update);
>   		if (r)
>   			return r;
>   	}
>   
>   	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
> -		struct dma_fence *f;
> -
>   		/* ignore duplicates */
>   		bo = ttm_to_amdgpu_bo(e->tv.bo);
>   		if (!bo)
> @@ -835,8 +831,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
>   		if (r)
>   			return r;
>   
> -		f = bo_va->last_pt_update;
> -		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
> +		r = amdgpu_sync_vm_fence(adev, &p->job->sync,
> +					 bo_va->last_pt_update);
>   		if (r)
>   			return r;
>   	}
> @@ -849,7 +845,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
>   	if (r)
>   		return r;
>   
> -	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
> +	r = amdgpu_sync_vm_fence(adev, &p->job->sync, vm->last_update);
>   	if (r)
>   		return r;
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> index 95e5e93edd18..9b28c1eb5f49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> @@ -161,9 +161,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>   
>   	if (!f)
>   		return 0;
> -	if (amdgpu_sync_same_dev(adev, f) &&
> -	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
> -		amdgpu_sync_keep_later(&sync->last_vm_update, f);

If you remove this, you can remove the adev parameter from this function.


>   
>   	if (amdgpu_sync_add_later(sync, f, explicit))
>   		return 0;
> @@ -179,6 +176,25 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>   	return 0;
>   }
>   
> +/**
> + * amdgpu_sync_vm_fence - remember to sync to this VM fence
> + *
> + * @adev: amdgpu device
> + * @sync: sync object to add fence to
> + * @fence: the VM fence to add
> + *
> + * Add the fence to the sync object and remember it as VM update.
> + */
> +int amdgpu_sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
> +			 struct dma_fence *fence)
> +{
> +	if (!fence)
> +		return 0;
> +
> +	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
> +	return amdgpu_sync_fence(adev, sync, fence, false);

Looks like you don't need adev here either, because you don't have the 
amdgpu_sync_same_dev condition any more.

Regards,
   Felix

> +}
> +
>   /**
>    * amdgpu_sync_resv - sync to a reservation object
>    *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
> index b5f1778a2319..ac210dd34371 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
> @@ -42,6 +42,8 @@ struct amdgpu_sync {
>   void amdgpu_sync_create(struct amdgpu_sync *sync);
>   int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
>   		      struct dma_fence *f, bool explicit);
> +int amdgpu_sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
> +			 struct dma_fence *fence);
>   int amdgpu_sync_resv(struct amdgpu_device *adev,
>   		     struct amdgpu_sync *sync,
>   		     struct dma_resv *resv,
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu: stop adding VM updates fences to the resv obj
  2019-12-04 15:38 ` [PATCH 3/3] drm/amdgpu: stop adding VM updates fences to the resv obj Christian König
@ 2019-12-05  0:15   ` Felix Kuehling
  0 siblings, 0 replies; 7+ messages in thread
From: Felix Kuehling @ 2019-12-05  0:15 UTC (permalink / raw)
  To: Christian König, amd-gfx, philip.yang, Sierra Guiza,
	Alejandro (Alex)

[+Alejandro]

On 2019-12-04 10:38 a.m., Christian König wrote:
> This way we can do updates even without the resv obj locked.

This could use a bit more explanation. This change depends on the 
previous one that adds explicit synchronization with page table updates 
during command submission in amdgpu_cs.c. You're adding two fences to 
the VM for the last direct/delayed page table update that is used to 
prevent eviction of busy page tables (that no longer have the update 
fence in their resv) and to implement amdgpu_vm_wait_idle.

In addition to this patch, updating page tables without the resv locked 
still needs additional safeguards to ensure the page tables are resident 
while preparing the page table update. That will come in Alejandro's 
changes for invalidating PTEs in MMU notifiers.


>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c    |  6 ++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c      | 30 ++++++++++++++++++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h      |  4 +++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 +++++---
>   4 files changed, 40 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> index 9b28c1eb5f49..7f17c06b8a3f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
> @@ -241,10 +241,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
>   			/* VM updates are only interesting
>   			 * for other VM updates and moves.
>   			 */
> -			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
> -			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
> -			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
> -			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
> +			if (owner == AMDGPU_FENCE_OWNER_VM &&
> +			    fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)

I don't really understand this condition now. Why does this function 
need to change at all? This function doesn't add fences to resvs, it 
adds fences from a resv to a sync_obj. Is this just a simplification 
because you assume that VM fences can no longer be found in resvs? Would 
it be worth adding a WARN_ONCE to check this assumption?

Maybe the comment above should also be updated. I think what this 
condition now states is something like /* VM updates don't wait for user 
mode fences. */


>   				continue;
>   
>   			/* Ignore fence from the same owner and explicit one as
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index a22bd57129d1..0d700e8154c4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -562,8 +562,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
>   {
>   	entry->priority = 0;
>   	entry->tv.bo = &vm->root.base.bo->tbo;
> -	/* One for the VM updates, one for TTM and one for the CS job */
> -	entry->tv.num_shared = 3;
> +	/* One for TTM and one for the CS job */
> +	entry->tv.num_shared = 2;
>   	entry->user_pages = NULL;
>   	list_add(&entry->tv.head, validated);
>   }
> @@ -2522,6 +2522,11 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
>   	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
>   		return false;
>   
> +	/* Don't evict VM page tables while they are updated */
> +	if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
> +	    !dma_fence_is_signaled(bo_base->vm->last_delayed))
> +		return false;
> +
>   	return true;
>   }
>   
> @@ -2687,8 +2692,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
>    */
>   long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
>   {
> -	return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
> -						   true, true, timeout);
> +	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
> +					    true, true, timeout);
> +	if (timeout <= 0)
> +		return timeout;
> +
> +	timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
> +	if (timeout <= 0)
> +		return timeout;
> +
> +	return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
>   }
>   
>   /**
> @@ -2757,6 +2770,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   	else
>   		vm->update_funcs = &amdgpu_vm_sdma_funcs;
>   	vm->last_update = NULL;
> +	vm->last_direct = dma_fence_get_stub();
> +	vm->last_delayed = dma_fence_get_stub();
>   
>   	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
>   	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
> @@ -2807,6 +2822,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   	vm->root.base.bo = NULL;
>   
>   error_free_delayed:
> +	dma_fence_put(vm->last_direct);
> +	dma_fence_put(vm->last_delayed);
>   	drm_sched_entity_destroy(&vm->delayed);
>   
>   error_free_direct:
> @@ -3007,6 +3024,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>   		vm->pasid = 0;
>   	}
>   
> +	dma_fence_wait(vm->last_direct, false);
> +	dma_fence_put(vm->last_direct);
> +	dma_fence_wait(vm->last_delayed, false);
> +	dma_fence_put(vm->last_delayed);
> +
>   	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
>   		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
>   			amdgpu_vm_prt_fini(adev, vm);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index db561765453b..d93ea9ad879e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -269,6 +269,10 @@ struct amdgpu_vm {
>   	struct drm_sched_entity	direct;
>   	struct drm_sched_entity	delayed;
>   
> +	/* Last submission to the scheduler entities */
> +	struct dma_fence	*last_direct;
> +	struct dma_fence	*last_delayed;
> +
>   	unsigned int		pasid;
>   	/* dedicated to vm */
>   	struct amdgpu_vmid	*reserved_vmid[AMDGPU_MAX_VMHUBS];
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> index 832db59f441e..04e79c75c87e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> @@ -95,11 +95,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
>   static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
>   				 struct dma_fence **fence)
>   {
> -	struct amdgpu_bo *root = p->vm->root.base.bo;
>   	struct amdgpu_ib *ib = p->job->ibs;
>   	struct drm_sched_entity *entity;
> +	struct dma_fence *f, *tmp;
>   	struct amdgpu_ring *ring;
> -	struct dma_fence *f;
>   	int r;
>   
>   	entity = p->direct ? &p->vm->direct : &p->vm->delayed;
> @@ -112,7 +111,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
>   	if (r)
>   		goto error;
>   
> -	amdgpu_bo_fence(root, f, true);

This seems to be the change advertised in the headline, where it no 
longer adds the shared VM fence to the PD resv. All the rest of this 
patch I'm trying to explain to myself as I'm reviewing it. Do my 
comments above sound about right? A better patch description would have 
saved me about half an hour of head-scratching.

Thanks,
   Felix

> +	tmp = dma_fence_get(f);
> +	if (p->direct)
> +		swap(p->vm->last_direct, tmp);
> +	else
> +		swap(p->vm->last_delayed, tmp);
> +	dma_fence_put(tmp);
> +
>   	if (fence && !p->direct)
>   		swap(*fence, f);
>   	dma_fence_put(f);
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c
  2019-12-04 15:38 [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c Christian König
  2019-12-04 15:38 ` [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates Christian König
  2019-12-04 15:38 ` [PATCH 3/3] drm/amdgpu: stop adding VM updates fences to the resv obj Christian König
@ 2019-12-05  1:31 ` Felix Kuehling
  2 siblings, 0 replies; 7+ messages in thread
From: Felix Kuehling @ 2019-12-05  1:31 UTC (permalink / raw)
  To: Christian König, amd-gfx, philip.yang

On 2019-12-04 10:38 a.m., Christian König wrote:
> When a page tables needs to be evicted the VM code should
> decide if that is possible or not.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |  5 +----
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 22 ++++++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  1 +
>   3 files changed, 24 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 19ffe00d9072..81f6764f1ba6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1489,11 +1489,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>   	struct dma_fence *f;
>   	int i;
>   
> -	/* Don't evict VM page tables while they are busy, otherwise we can't
> -	 * cleanly handle page faults.
> -	 */
>   	if (bo->type == ttm_bo_type_kernel &&
> -	    !dma_resv_test_signaled_rcu(bo->base.resv, true))
> +	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
>   		return false;
>   
>   	/* If bo is a KFD BO, check if the bo belongs to the current process.
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index a94c4faa5af1..a22bd57129d1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2503,6 +2503,28 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
>   	kfree(bo_va);
>   }
>   
> +/**
> + * amdgpu_vm_evictable - check if we can evict a VM
> + *
> + * @bo: A page table of the VM.
> + *
> + * Check if it is possible to evict a VM.
> + */
> +bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
> +{
> +	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
> +
> +	/* Page tables of a destroyed VM can go away immediately */
> +	if (!bo_base || !bo_base->vm)
> +		return true;
> +
> +	/* Don't evict VM page tables while they are busy */
> +	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
> +		return false;
> +
> +	return true;
> +}
> +
>   /**
>    * amdgpu_vm_bo_invalidate - mark the bo as invalid
>    *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index 76fcf853035c..db561765453b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -381,6 +381,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
>   int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>   			struct amdgpu_bo_va *bo_va,
>   			bool clear);
> +bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
>   void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
>   			     struct amdgpu_bo *bo, bool evicted);
>   uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-12-05  1:31 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-04 15:38 [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c Christian König
2019-12-04 15:38 ` [PATCH 2/3] drm/amdgpu: explicitely sync to VM updates Christian König
2019-12-04 16:23   ` Zeng, Oak
2019-12-04 23:05   ` Felix Kuehling
2019-12-04 15:38 ` [PATCH 3/3] drm/amdgpu: stop adding VM updates fences to the resv obj Christian König
2019-12-05  0:15   ` Felix Kuehling
2019-12-05  1:31 ` [PATCH 1/3] drm/amdgpu: move VM eviction decision into amdgpu_vm.c Felix Kuehling

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.