All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/3] drm/amdgpu: replace TLB seq callback with HW seq
@ 2024-01-31 17:14 Shashank Sharma
  2024-01-31 17:14 ` [PATCH v2 2/3] drm/amdgpu: implement TLB flush fence Shashank Sharma
  2024-01-31 17:14 ` [PATCH v2 3/3] drm/amdgpu: sync page table freeing with tlb flush Shashank Sharma
  0 siblings, 2 replies; 5+ messages in thread
From: Shashank Sharma @ 2024-01-31 17:14 UTC (permalink / raw)
  To: amd-gfx
  Cc: Alex Deucher, Felix Kuehling, Christian König, Rajneesh Bhardwaj

From: Christian König <christian.koenig@amd.com>

The callback we installed for the SDMA update were actually pretty
horrible. since we now have seq64 use that one and HW seq writes
instead.

V2:(Shashank)
 - rebased on amd-drm-staging-next
 - changed amdgpu_seq64_gpu_addr

Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c   | 14 ++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h   |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c      | 79 ++++-----------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h      | 27 ++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c  |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c |  5 ++
 7 files changed, 42 insertions(+), 89 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index 3d0d56087d41..300dc79fa2b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -199,6 +199,20 @@ void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va)
 		__clear_bit(bit_pos, adev->seq64.used);
 }
 
+/**
+ * amdgpu_seq64_gpu_addr - Calculate GPU addr from va
+ *
+ * @adev: amdgpu_device pointer
+ * @va: virtual address in client address space
+ *
+ * Calculate the GART address for a VA.
+ */
+u64 amdgpu_seq64_gpu_addr(struct amdgpu_device *adev, u64 va)
+{
+	return va - amdgpu_seq64_get_va_base(adev) +
+		amdgpu_bo_gpu_offset(adev->seq64.sbo);
+}
+
 /**
  * amdgpu_seq64_fini - Cleanup seq64 driver
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 4203b2ab318d..63e8ac0a2057 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -43,6 +43,7 @@ void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
 int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		     struct amdgpu_bo_va **bo_va);
 void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv);
+u64 amdgpu_seq64_gpu_addr(struct amdgpu_device *adev, u64 va);
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ed4a8c5d26d7..0960e0a665d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -111,21 +111,6 @@ struct amdgpu_prt_cb {
 	struct dma_fence_cb cb;
 };
 
-/**
- * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
- */
-struct amdgpu_vm_tlb_seq_struct {
-	/**
-	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
-	 */
-	struct amdgpu_vm *vm;
-
-	/**
-	 * @cb: callback
-	 */
-	struct dma_fence_cb cb;
-};
-
 /**
  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
  *
@@ -862,23 +847,6 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 	return r;
 }
 
-/**
- * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
- * @fence: unused
- * @cb: the callback structure
- *
- * Increments the tlb sequence to make sure that future CS execute a VM flush.
- */
-static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
-				 struct dma_fence_cb *cb)
-{
-	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
-
-	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
-	atomic64_inc(&tlb_cb->vm->tlb_seq);
-	kfree(tlb_cb);
-}
-
 /**
  * amdgpu_vm_update_range - update a range in the vm page table
  *
@@ -911,7 +879,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 			   struct dma_fence **fence)
 {
 	struct amdgpu_vm_update_params params;
-	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
 	struct amdgpu_res_cursor cursor;
 	enum amdgpu_sync_mode sync_mode;
 	int r, idx;
@@ -919,12 +886,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
 		return -ENODEV;
 
-	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
-	if (!tlb_cb) {
-		r = -ENOMEM;
-		goto error_unlock;
-	}
-
 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
 	 * heavy-weight flush TLB unconditionally.
 	 */
@@ -942,6 +903,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	params.immediate = immediate;
 	params.pages_addr = pages_addr;
 	params.unlocked = unlocked;
+	params.needs_flush = flush_tlb;
 	params.allow_override = allow_override;
 
 	/* Implicitly sync to command submissions in the same VM before
@@ -955,7 +917,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	amdgpu_vm_eviction_lock(vm);
 	if (vm->evicting) {
 		r = -EBUSY;
-		goto error_free;
+		goto error_unlock;
 	}
 
 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
@@ -968,7 +930,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 	r = vm->update_funcs->prepare(&params, resv, sync_mode);
 	if (r)
-		goto error_free;
+		goto error_unlock;
 
 	amdgpu_res_first(pages_addr ? NULL : res, offset,
 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
@@ -1018,7 +980,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		tmp = start + num_entries;
 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
 		if (r)
-			goto error_free;
+			goto error_unlock;
 
 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
 		start = tmp;
@@ -1026,22 +988,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 	r = vm->update_funcs->commit(&params, fence);
 
-	if (flush_tlb || params.table_freed) {
-		tlb_cb->vm = vm;
-		if (fence && *fence &&
-		    !dma_fence_add_callback(*fence, &tlb_cb->cb,
-					   amdgpu_vm_tlb_seq_cb)) {
-			dma_fence_put(vm->last_tlb_flush);
-			vm->last_tlb_flush = dma_fence_get(*fence);
-		} else {
-			amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
-		}
-		tlb_cb = NULL;
-	}
-
-error_free:
-	kfree(tlb_cb);
-
 error_unlock:
 	amdgpu_vm_eviction_unlock(vm);
 	drm_dev_exit(idx);
@@ -2260,10 +2206,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
 	INIT_KFIFO(vm->faults);
 
-	r = amdgpu_vm_init_entities(adev, vm);
+	r = amdgpu_seq64_alloc(adev, &vm->tlb_seq_va, &vm->tlb_seq_cpu_addr);
 	if (r)
 		return r;
 
+	r = amdgpu_vm_init_entities(adev, vm);
+	if (r)
+		goto error_free_seq;
+
 	vm->pte_support_ats = false;
 	vm->is_compute_context = false;
 
@@ -2283,7 +2233,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 	vm->last_update = dma_fence_get_stub();
 	vm->last_unlocked = dma_fence_get_stub();
-	vm->last_tlb_flush = dma_fence_get_stub();
 	vm->generation = 0;
 
 	mutex_init(&vm->eviction_lock);
@@ -2322,10 +2271,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	amdgpu_bo_unref(&root_bo);
 
 error_free_delayed:
-	dma_fence_put(vm->last_tlb_flush);
 	dma_fence_put(vm->last_unlocked);
 	amdgpu_vm_fini_entities(vm);
 
+error_free_seq:
+	amdgpu_seq64_free(adev, vm->tlb_seq_va);
 	return r;
 }
 
@@ -2441,7 +2391,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	struct amdgpu_bo_va_mapping *mapping, *tmp;
 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
 	struct amdgpu_bo *root;
-	unsigned long flags;
 	int i;
 
 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
@@ -2453,11 +2402,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	amdgpu_vm_set_pasid(adev, vm, 0);
 	dma_fence_wait(vm->last_unlocked, false);
 	dma_fence_put(vm->last_unlocked);
-	dma_fence_wait(vm->last_tlb_flush, false);
-	/* Make sure that all fence callbacks have completed */
-	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
-	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
-	dma_fence_put(vm->last_tlb_flush);
+	amdgpu_seq64_free(adev, vm->tlb_seq_va);
 
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 666698a57192..ac9380afcb69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -247,9 +247,9 @@ struct amdgpu_vm_update_params {
 	unsigned int num_dw_left;
 
 	/**
-	 * @table_freed: return true if page table is freed when updating
+	 * @needs_flush: true whenever we need to invalidate the TLB
 	 */
-	bool table_freed;
+	bool needs_flush;
 
 	/**
 	 * @allow_override: true for memory that is not uncached: allows MTYPE
@@ -328,9 +328,11 @@ struct amdgpu_vm {
 	struct drm_sched_entity	immediate;
 	struct drm_sched_entity	delayed;
 
-	/* Last finished delayed update */
+	/* Sequence number indicating necessary TLB flush */
 	atomic64_t		tlb_seq;
-	struct dma_fence	*last_tlb_flush;
+	uint64_t		tlb_seq_va;
+	uint64_t		*tlb_seq_cpu_addr;
+
 	atomic64_t		kfd_last_flushed_seq;
 
 	/* How many times we had to re-generate the page tables */
@@ -549,22 +551,7 @@ int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
  */
 static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
 {
-	unsigned long flags;
-	spinlock_t *lock;
-
-	/*
-	 * Workaround to stop racing between the fence signaling and handling
-	 * the cb. The lock is static after initially setting it up, just make
-	 * sure that the dma_fence structure isn't freed up.
-	 */
-	rcu_read_lock();
-	lock = vm->last_tlb_flush->lock;
-	rcu_read_unlock();
-
-	spin_lock_irqsave(lock, flags);
-	spin_unlock_irqrestore(lock, flags);
-
-	return atomic64_read(&vm->tlb_seq);
+	return READ_ONCE(*vm->tlb_seq_cpu_addr);
 }
 
 /*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 6e31621452de..0f8fd0acef7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -108,7 +108,8 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
 static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
 				struct dma_fence **fence)
 {
-	/* Flush HDP */
+	if (p->needs_flush)
+		*p->vm->tlb_seq_cpu_addr = atomic64_inc_return(&p->vm->tlb_seq);
 	mb();
 	amdgpu_device_flush_hdp(p->adev, NULL);
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index a160265ddc07..95dc0afdaffb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -1056,7 +1056,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
 			while (cursor.pfn < frag_start) {
 				/* Make sure previous mapping is freed */
 				if (cursor.entry->bo) {
-					params->table_freed = true;
+					params->needs_flush = true;
 					amdgpu_vm_pt_free_dfs(adev, params->vm,
 							      &cursor,
 							      params->unlocked);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 349416e176a1..91cc3121fd4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -126,6 +126,11 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 
 	WARN_ON(ib->length_dw == 0);
 	amdgpu_ring_pad_ib(ring, ib);
+	if (p->needs_flush) {
+		p->job->uf_addr = amdgpu_seq64_gpu_addr(p->adev,
+							p->vm->tlb_seq_va);
+		p->job->uf_sequence = atomic64_inc_return(&p->vm->tlb_seq);
+	}
 	WARN_ON(ib->length_dw > p->num_dw_left);
 	f = amdgpu_job_submit(p->job);
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 2/3] drm/amdgpu: implement TLB flush fence
  2024-01-31 17:14 [PATCH v2 1/3] drm/amdgpu: replace TLB seq callback with HW seq Shashank Sharma
@ 2024-01-31 17:14 ` Shashank Sharma
  2024-01-31 17:14 ` [PATCH v2 3/3] drm/amdgpu: sync page table freeing with tlb flush Shashank Sharma
  1 sibling, 0 replies; 5+ messages in thread
From: Shashank Sharma @ 2024-01-31 17:14 UTC (permalink / raw)
  To: amd-gfx
  Cc: Alex Deucher, Felix Kuehling, Shashank Sharma,
	Christian König, Rajneesh Bhardwaj

From: Christian König <christian.koenig@amd.com>

The problem is that when (for example) 4k pages are replaced
with a single 2M page we need to wait for change to be flushed
out by invalidating the TLB before the PT can be freed.

Solve this by moving the TLB flush into a DMA-fence object which
can be used to delay the freeing of the PT BOs until it is signaled.

V2: (Shashank)
    - rebase
    - set dma_fence_error only in case of error
    - add tlb_flush fence only when PT/PD BO is locked (Felix)
    - use vm->pasid when f is NULL (Mukul)

Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Shashank Sharma <shashank.sharma@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile           |   3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  10 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h        |   4 +
 .../gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c  | 106 ++++++++++++++++++
 4 files changed, 122 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 4c989da4d2f3..fdbb3d770c7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -70,7 +70,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
 	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \
 	atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
 	atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
-	amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_ib.o amdgpu_pll.o \
+	amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_vm_tlb_fence.o \
+	amdgpu_ib.o amdgpu_pll.o \
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
 	amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
 	amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0960e0a665d3..67c690044b97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -932,6 +932,15 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	if (r)
 		goto error_unlock;
 
+	/* Prepare a TLB flush fence to be attached to PTs */
+	if (!unlocked && params.needs_flush && vm->is_compute_context) {
+		amdgpu_vm_tlb_fence_create(adev, vm, fence);
+
+		/* Makes sure no PD/PT is freed before the flush */
+		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
+				   DMA_RESV_USAGE_BOOKKEEP);
+	}
+
 	amdgpu_res_first(pages_addr ? NULL : res, offset,
 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
 	while (cursor.remaining) {
@@ -2237,6 +2246,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 	mutex_init(&vm->eviction_lock);
 	vm->evicting = false;
+	vm->tlb_fence_context = dma_fence_context_alloc(1);
 
 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
 				false, &root, xcp_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index ac9380afcb69..8e6fd25d07b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -332,6 +332,7 @@ struct amdgpu_vm {
 	atomic64_t		tlb_seq;
 	uint64_t		tlb_seq_va;
 	uint64_t		*tlb_seq_cpu_addr;
+	uint64_t		tlb_fence_context;
 
 	atomic64_t		kfd_last_flushed_seq;
 
@@ -585,5 +586,8 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
 				  uint64_t addr,
 				  uint32_t status,
 				  unsigned int vmhub);
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
+				 struct amdgpu_vm *vm,
+				 struct dma_fence **fence);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
new file mode 100644
index 000000000000..569681badd7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_gmc.h"
+
+struct amdgpu_tlb_fence {
+	struct dma_fence	base;
+	struct amdgpu_device	*adev;
+	struct dma_fence	*dependency;
+	struct work_struct	work;
+	spinlock_t		lock;
+	uint16_t		pasid;
+
+};
+
+static const char *amdgpu_tlb_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "amdgpu tlb fence";
+}
+
+static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
+{
+	return "amdgpu tlb timeline";
+}
+
+static void amdgpu_tlb_fence_work(struct work_struct *work)
+{
+	struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
+	int r;
+
+	r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0);
+	if (r) {
+		dev_err(f->adev->dev, "TLB flush failed for PASID %d.\n",
+			f->pasid);
+		dma_fence_set_error(&f->base, r);
+	}
+
+	dma_fence_signal(&f->base);
+	dma_fence_put(&f->base);
+}
+
+static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
+	.use_64bit_seqno = true,
+	.get_driver_name = amdgpu_tlb_fence_get_driver_name,
+	.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
+};
+
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+				struct dma_fence **fence)
+{
+	struct amdgpu_tlb_fence *f;
+
+	f = kmalloc(sizeof(*f), GFP_KERNEL);
+	if (!f) {
+		/*
+		 * We can't fail since the PDEs and PTEs are already updated, so
+		 * just block for the dependency and execute the TLB flush
+		 */
+		if (*fence)
+			dma_fence_wait(*fence, false);
+
+		amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0);
+		*fence = dma_fence_get_stub();
+		return;
+	}
+
+	f->adev = adev;
+	f->dependency = *fence;
+	f->pasid = vm->pasid;
+	INIT_WORK(&f->work, amdgpu_tlb_fence_work);
+	spin_lock_init(&f->lock);
+
+	dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+		       vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+
+	/* TODO: We probably need a separate wq here */
+	dma_fence_get(&f->base);
+	schedule_work(&f->work);
+
+	*fence = &f->base;
+}
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 3/3] drm/amdgpu: sync page table freeing with tlb flush
  2024-01-31 17:14 [PATCH v2 1/3] drm/amdgpu: replace TLB seq callback with HW seq Shashank Sharma
  2024-01-31 17:14 ` [PATCH v2 2/3] drm/amdgpu: implement TLB flush fence Shashank Sharma
@ 2024-01-31 17:14 ` Shashank Sharma
  2024-02-01 13:48   ` Christian König
  1 sibling, 1 reply; 5+ messages in thread
From: Shashank Sharma @ 2024-01-31 17:14 UTC (permalink / raw)
  To: amd-gfx
  Cc: Alex Deucher, Felix Kuehling, Rajneesh Bhardwaj,
	Christian König, Shashank Sharma

This patch:
- Attaches the TLB flush fence to the PT objects being freed
- Adds a new ptr in VM to save this last TLB flush fence
- Adds a new lock in VM to prevent out-of-context update of saved
  TLB flush fence
- Adds a new ptr in tlb_flush structure to save vm

The idea is to delay freeing of page table objects until we have the
respective TLB entries flushed.

V2: rebase

Cc: Christian König <Christian.Koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
Signed-off-by: Shashank Sharma <shashank.sharma@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  3 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h        |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c     | 27 +++++++++++++++++++
 .../gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c  | 13 +++++++--
 4 files changed, 45 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 67c690044b97..b0e81c249e3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2245,6 +2245,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	vm->generation = 0;
 
 	mutex_init(&vm->eviction_lock);
+	mutex_init(&vm->tlb_flush_lock);
 	vm->evicting = false;
 	vm->tlb_fence_context = dma_fence_context_alloc(1);
 
@@ -2360,7 +2361,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	}
 
 	dma_fence_put(vm->last_update);
+	dma_fence_put(vm->tlb_fence_last);
 	vm->last_update = dma_fence_get_stub();
+	vm->tlb_fence_last = dma_fence_get_stub();
 	vm->is_compute_context = true;
 
 	/* Free the shadow bo for compute VM */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 8e6fd25d07b7..b05bc586237f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -334,6 +334,10 @@ struct amdgpu_vm {
 	uint64_t		*tlb_seq_cpu_addr;
 	uint64_t		tlb_fence_context;
 
+	/* Ptr and lock to maintain tlb flush sync */
+	struct mutex		tlb_flush_lock;
+	struct dma_fence        *tlb_fence_last;
+
 	atomic64_t		kfd_last_flushed_seq;
 
 	/* How many times we had to re-generate the page tables */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 95dc0afdaffb..f1c4418c4d63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -631,6 +631,18 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
 	return r;
 }
 
+static inline
+void amdgpu_vm_attach_tlb_fence(struct amdgpu_bo *bo, struct dma_fence *fence)
+{
+	if (!bo || !fence)
+		return;
+
+	if (!dma_resv_reserve_fences(bo->tbo.base.resv, 1)) {
+		dma_resv_add_fence(bo->tbo.base.resv, fence,
+				   DMA_RESV_USAGE_BOOKKEEP);
+	}
+}
+
 /**
  * amdgpu_vm_pt_free - free one PD/PT
  *
@@ -638,6 +650,7 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
  */
 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
 {
+	struct amdgpu_vm *vm;
 	struct amdgpu_bo *shadow;
 
 	if (!entry->bo)
@@ -646,9 +659,23 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
 	entry->bo->vm_bo = NULL;
 	shadow = amdgpu_bo_shadowed(entry->bo);
 	if (shadow) {
+		vm = shadow->vm_bo->vm;
+
+		mutex_lock(&vm->tlb_flush_lock);
+		if (vm->tlb_fence_last)
+			amdgpu_vm_attach_tlb_fence(shadow, vm->tlb_fence_last);
+		mutex_unlock(&vm->tlb_flush_lock);
+
 		ttm_bo_set_bulk_move(&shadow->tbo, NULL);
 		amdgpu_bo_unref(&shadow);
 	}
+
+	vm = entry->vm;
+	mutex_lock(&vm->tlb_flush_lock);
+	if (vm->tlb_fence_last)
+		amdgpu_vm_attach_tlb_fence(entry->bo, vm->tlb_fence_last);
+	mutex_unlock(&vm->tlb_flush_lock);
+
 	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
 
 	spin_lock(&entry->vm->status_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 569681badd7c..54ec81d30034 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -31,6 +31,7 @@
 struct amdgpu_tlb_fence {
 	struct dma_fence	base;
 	struct amdgpu_device	*adev;
+	struct amdgpu_vm	*vm;
 	struct dma_fence	*dependency;
 	struct work_struct	work;
 	spinlock_t		lock;
@@ -51,6 +52,7 @@ static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
 static void amdgpu_tlb_fence_work(struct work_struct *work)
 {
 	struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
+	struct amdgpu_vm *vm = f->vm;
 	int r;
 
 	r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0);
@@ -62,6 +64,10 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
 
 	dma_fence_signal(&f->base);
 	dma_fence_put(&f->base);
+
+	mutex_lock(&vm->tlb_flush_lock);
+	vm->tlb_fence_last = NULL;
+	mutex_unlock(&vm->tlb_flush_lock);
 }
 
 static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
@@ -92,6 +98,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
 	f->adev = adev;
 	f->dependency = *fence;
 	f->pasid = vm->pasid;
+	f->vm = vm;
 	INIT_WORK(&f->work, amdgpu_tlb_fence_work);
 	spin_lock_init(&f->lock);
 
@@ -99,8 +106,10 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
 		       vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
 
 	/* TODO: We probably need a separate wq here */
-	dma_fence_get(&f->base);
-	schedule_work(&f->work);
+	mutex_lock(&vm->tlb_flush_lock);
+	vm->tlb_fence_last = dma_fence_get(&f->base);
+	mutex_unlock(&vm->tlb_flush_lock);
 
+	schedule_work(&f->work);
 	*fence = &f->base;
 }
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 3/3] drm/amdgpu: sync page table freeing with tlb flush
  2024-01-31 17:14 ` [PATCH v2 3/3] drm/amdgpu: sync page table freeing with tlb flush Shashank Sharma
@ 2024-02-01 13:48   ` Christian König
  2024-02-06 13:52     ` Sharma, Shashank
  0 siblings, 1 reply; 5+ messages in thread
From: Christian König @ 2024-02-01 13:48 UTC (permalink / raw)
  To: Shashank Sharma, amd-gfx; +Cc: Alex Deucher, Felix Kuehling, Rajneesh Bhardwaj



Am 31.01.24 um 18:14 schrieb Shashank Sharma:
> This patch:
> - Attaches the TLB flush fence to the PT objects being freed
> - Adds a new ptr in VM to save this last TLB flush fence
> - Adds a new lock in VM to prevent out-of-context update of saved
>    TLB flush fence
> - Adds a new ptr in tlb_flush structure to save vm
>
> The idea is to delay freeing of page table objects until we have the
> respective TLB entries flushed.
>
> V2: rebase
>
> Cc: Christian König <Christian.Koenig@amd.com>
> Cc: Alex Deucher <alexander.deucher@amd.com>
> Cc: Felix Kuehling <felix.kuehling@amd.com>
> Cc: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
> Signed-off-by: Shashank Sharma <shashank.sharma@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  3 +++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h        |  4 +++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c     | 27 +++++++++++++++++++
>   .../gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c  | 13 +++++++--
>   4 files changed, 45 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 67c690044b97..b0e81c249e3a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2245,6 +2245,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   	vm->generation = 0;
>   
>   	mutex_init(&vm->eviction_lock);
> +	mutex_init(&vm->tlb_flush_lock);
>   	vm->evicting = false;
>   	vm->tlb_fence_context = dma_fence_context_alloc(1);
>   
> @@ -2360,7 +2361,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>   	}
>   
>   	dma_fence_put(vm->last_update);
> +	dma_fence_put(vm->tlb_fence_last);
>   	vm->last_update = dma_fence_get_stub();
> +	vm->tlb_fence_last = dma_fence_get_stub();
>   	vm->is_compute_context = true;
>   
>   	/* Free the shadow bo for compute VM */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index 8e6fd25d07b7..b05bc586237f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -334,6 +334,10 @@ struct amdgpu_vm {
>   	uint64_t		*tlb_seq_cpu_addr;
>   	uint64_t		tlb_fence_context;
>   
> +	/* Ptr and lock to maintain tlb flush sync */
> +	struct mutex		tlb_flush_lock;
> +	struct dma_fence        *tlb_fence_last;
> +
>   	atomic64_t		kfd_last_flushed_seq;
>   
>   	/* How many times we had to re-generate the page tables */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
> index 95dc0afdaffb..f1c4418c4d63 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
> @@ -631,6 +631,18 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
>   	return r;
>   }
>   
> +static inline
> +void amdgpu_vm_attach_tlb_fence(struct amdgpu_bo *bo, struct dma_fence *fence)
> +{
> +	if (!bo || !fence)
> +		return;
> +
> +	if (!dma_resv_reserve_fences(bo->tbo.base.resv, 1)) {
> +		dma_resv_add_fence(bo->tbo.base.resv, fence,
> +				   DMA_RESV_USAGE_BOOKKEEP);
> +	}
> +}
> +
>   /**
>    * amdgpu_vm_pt_free - free one PD/PT
>    *
> @@ -638,6 +650,7 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
>    */
>   static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
>   {
> +	struct amdgpu_vm *vm;
>   	struct amdgpu_bo *shadow;
>   
>   	if (!entry->bo)
> @@ -646,9 +659,23 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
>   	entry->bo->vm_bo = NULL;
>   	shadow = amdgpu_bo_shadowed(entry->bo);
>   	if (shadow) {
> +		vm = shadow->vm_bo->vm;
> +
> +		mutex_lock(&vm->tlb_flush_lock);
> +		if (vm->tlb_fence_last)
> +			amdgpu_vm_attach_tlb_fence(shadow, vm->tlb_fence_last);
> +		mutex_unlock(&vm->tlb_flush_lock);
> +
>   		ttm_bo_set_bulk_move(&shadow->tbo, NULL);
>   		amdgpu_bo_unref(&shadow);
>   	}
> +
> +	vm = entry->vm;
> +	mutex_lock(&vm->tlb_flush_lock);
> +	if (vm->tlb_fence_last)
> +		amdgpu_vm_attach_tlb_fence(entry->bo, vm->tlb_fence_last);
> +	mutex_unlock(&vm->tlb_flush_lock);
> +

That approach doesn't make sense. Instead add the freed PT/PDs to a 
linked list in the parameters structure and only really free them after 
adding the fence to the root PD.


>   	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
>   
>   	spin_lock(&entry->vm->status_lock);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
> index 569681badd7c..54ec81d30034 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
> @@ -31,6 +31,7 @@
>   struct amdgpu_tlb_fence {
>   	struct dma_fence	base;
>   	struct amdgpu_device	*adev;
> +	struct amdgpu_vm	*vm;

Big NAK to that. The VM might not live long enough to see the end of the 
TLB flush.

Regards,
Christian.

>   	struct dma_fence	*dependency;
>   	struct work_struct	work;
>   	spinlock_t		lock;
> @@ -51,6 +52,7 @@ static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
>   static void amdgpu_tlb_fence_work(struct work_struct *work)
>   {
>   	struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
> +	struct amdgpu_vm *vm = f->vm;
>   	int r;
>   
>   	r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0);
> @@ -62,6 +64,10 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
>   
>   	dma_fence_signal(&f->base);
>   	dma_fence_put(&f->base);
> +
> +	mutex_lock(&vm->tlb_flush_lock);
> +	vm->tlb_fence_last = NULL;
> +	mutex_unlock(&vm->tlb_flush_lock);
>   }
>   
>   static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
> @@ -92,6 +98,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
>   	f->adev = adev;
>   	f->dependency = *fence;
>   	f->pasid = vm->pasid;
> +	f->vm = vm;
>   	INIT_WORK(&f->work, amdgpu_tlb_fence_work);
>   	spin_lock_init(&f->lock);
>   
> @@ -99,8 +106,10 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
>   		       vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
>   
>   	/* TODO: We probably need a separate wq here */
> -	dma_fence_get(&f->base);
> -	schedule_work(&f->work);
> +	mutex_lock(&vm->tlb_flush_lock);
> +	vm->tlb_fence_last = dma_fence_get(&f->base);
> +	mutex_unlock(&vm->tlb_flush_lock);
>   
> +	schedule_work(&f->work);
>   	*fence = &f->base;
>   }


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 3/3] drm/amdgpu: sync page table freeing with tlb flush
  2024-02-01 13:48   ` Christian König
@ 2024-02-06 13:52     ` Sharma, Shashank
  0 siblings, 0 replies; 5+ messages in thread
From: Sharma, Shashank @ 2024-02-06 13:52 UTC (permalink / raw)
  To: Christian König, amd-gfx
  Cc: Alex Deucher, Felix Kuehling, Rajneesh Bhardwaj

Hey Christian,

On 01/02/2024 14:48, Christian König wrote:
>
>
> Am 31.01.24 um 18:14 schrieb Shashank Sharma:
>> This patch:
>> - Attaches the TLB flush fence to the PT objects being freed
>> - Adds a new ptr in VM to save this last TLB flush fence
>> - Adds a new lock in VM to prevent out-of-context update of saved
>>    TLB flush fence
>> - Adds a new ptr in tlb_flush structure to save vm
>>
>> The idea is to delay freeing of page table objects until we have the
>> respective TLB entries flushed.
>>
>> V2: rebase
>>
>> Cc: Christian König <Christian.Koenig@amd.com>
>> Cc: Alex Deucher <alexander.deucher@amd.com>
>> Cc: Felix Kuehling <felix.kuehling@amd.com>
>> Cc: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
>> Signed-off-by: Shashank Sharma <shashank.sharma@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        |  3 +++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h        |  4 +++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c     | 27 +++++++++++++++++++
>>   .../gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c  | 13 +++++++--
>>   4 files changed, 45 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 67c690044b97..b0e81c249e3a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -2245,6 +2245,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, 
>> struct amdgpu_vm *vm,
>>       vm->generation = 0;
>>         mutex_init(&vm->eviction_lock);
>> +    mutex_init(&vm->tlb_flush_lock);
>>       vm->evicting = false;
>>       vm->tlb_fence_context = dma_fence_context_alloc(1);
>>   @@ -2360,7 +2361,9 @@ int amdgpu_vm_make_compute(struct 
>> amdgpu_device *adev, struct amdgpu_vm *vm)
>>       }
>>         dma_fence_put(vm->last_update);
>> +    dma_fence_put(vm->tlb_fence_last);
>>       vm->last_update = dma_fence_get_stub();
>> +    vm->tlb_fence_last = dma_fence_get_stub();
>>       vm->is_compute_context = true;
>>         /* Free the shadow bo for compute VM */
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> index 8e6fd25d07b7..b05bc586237f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> @@ -334,6 +334,10 @@ struct amdgpu_vm {
>>       uint64_t        *tlb_seq_cpu_addr;
>>       uint64_t        tlb_fence_context;
>>   +    /* Ptr and lock to maintain tlb flush sync */
>> +    struct mutex        tlb_flush_lock;
>> +    struct dma_fence        *tlb_fence_last;
>> +
>>       atomic64_t        kfd_last_flushed_seq;
>>         /* How many times we had to re-generate the page tables */
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> index 95dc0afdaffb..f1c4418c4d63 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> @@ -631,6 +631,18 @@ static int amdgpu_vm_pt_alloc(struct 
>> amdgpu_device *adev,
>>       return r;
>>   }
>>   +static inline
>> +void amdgpu_vm_attach_tlb_fence(struct amdgpu_bo *bo, struct 
>> dma_fence *fence)
>> +{
>> +    if (!bo || !fence)
>> +        return;
>> +
>> +    if (!dma_resv_reserve_fences(bo->tbo.base.resv, 1)) {
>> +        dma_resv_add_fence(bo->tbo.base.resv, fence,
>> +                   DMA_RESV_USAGE_BOOKKEEP);
>> +    }
>> +}
>> +
>>   /**
>>    * amdgpu_vm_pt_free - free one PD/PT
>>    *
>> @@ -638,6 +650,7 @@ static int amdgpu_vm_pt_alloc(struct 
>> amdgpu_device *adev,
>>    */
>>   static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
>>   {
>> +    struct amdgpu_vm *vm;
>>       struct amdgpu_bo *shadow;
>>         if (!entry->bo)
>> @@ -646,9 +659,23 @@ static void amdgpu_vm_pt_free(struct 
>> amdgpu_vm_bo_base *entry)
>>       entry->bo->vm_bo = NULL;
>>       shadow = amdgpu_bo_shadowed(entry->bo);
>>       if (shadow) {
>> +        vm = shadow->vm_bo->vm;
>> +
>> +        mutex_lock(&vm->tlb_flush_lock);
>> +        if (vm->tlb_fence_last)
>> +            amdgpu_vm_attach_tlb_fence(shadow, vm->tlb_fence_last);
>> +        mutex_unlock(&vm->tlb_flush_lock);
>> +
>>           ttm_bo_set_bulk_move(&shadow->tbo, NULL);
>>           amdgpu_bo_unref(&shadow);
>>       }
>> +
>> +    vm = entry->vm;
>> +    mutex_lock(&vm->tlb_flush_lock);
>> +    if (vm->tlb_fence_last)
>> +        amdgpu_vm_attach_tlb_fence(entry->bo, vm->tlb_fence_last);
>> +    mutex_unlock(&vm->tlb_flush_lock);
>> +
>
> That approach doesn't make sense. Instead add the freed PT/PDs to a 
> linked list in the parameters structure and only really free them 
> after adding the fence to the root PD.

Sure, I will do those changes.

Just for the curiosity, why wouldn't this approach work ? Wouldn't this 
delay the actual freeing of buffers TTM until the fence signal ?

- Shashank

>
>
>> ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
>>         spin_lock(&entry->vm->status_lock);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
>> index 569681badd7c..54ec81d30034 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
>> @@ -31,6 +31,7 @@
>>   struct amdgpu_tlb_fence {
>>       struct dma_fence    base;
>>       struct amdgpu_device    *adev;
>> +    struct amdgpu_vm    *vm;
>
> Big NAK to that. The VM might not live long enough to see the end of 
> the TLB flush.
>
> Regards,
> Christian.
>
>>       struct dma_fence    *dependency;
>>       struct work_struct    work;
>>       spinlock_t        lock;
>> @@ -51,6 +52,7 @@ static const char 
>> *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
>>   static void amdgpu_tlb_fence_work(struct work_struct *work)
>>   {
>>       struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
>> +    struct amdgpu_vm *vm = f->vm;
>>       int r;
>>         r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, 
>> true, 0);
>> @@ -62,6 +64,10 @@ static void amdgpu_tlb_fence_work(struct 
>> work_struct *work)
>>         dma_fence_signal(&f->base);
>>       dma_fence_put(&f->base);
>> +
>> +    mutex_lock(&vm->tlb_flush_lock);
>> +    vm->tlb_fence_last = NULL;
>> +    mutex_unlock(&vm->tlb_flush_lock);
>>   }
>>     static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
>> @@ -92,6 +98,7 @@ void amdgpu_vm_tlb_fence_create(struct 
>> amdgpu_device *adev, struct amdgpu_vm *vm
>>       f->adev = adev;
>>       f->dependency = *fence;
>>       f->pasid = vm->pasid;
>> +    f->vm = vm;
>>       INIT_WORK(&f->work, amdgpu_tlb_fence_work);
>>       spin_lock_init(&f->lock);
>>   @@ -99,8 +106,10 @@ void amdgpu_vm_tlb_fence_create(struct 
>> amdgpu_device *adev, struct amdgpu_vm *vm
>>                  vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
>>         /* TODO: We probably need a separate wq here */
>> -    dma_fence_get(&f->base);
>> -    schedule_work(&f->work);
>> +    mutex_lock(&vm->tlb_flush_lock);
>> +    vm->tlb_fence_last = dma_fence_get(&f->base);
>> +    mutex_unlock(&vm->tlb_flush_lock);
>>   +    schedule_work(&f->work);
>>       *fence = &f->base;
>>   }
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-02-06 13:52 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-01-31 17:14 [PATCH v2 1/3] drm/amdgpu: replace TLB seq callback with HW seq Shashank Sharma
2024-01-31 17:14 ` [PATCH v2 2/3] drm/amdgpu: implement TLB flush fence Shashank Sharma
2024-01-31 17:14 ` [PATCH v2 3/3] drm/amdgpu: sync page table freeing with tlb flush Shashank Sharma
2024-02-01 13:48   ` Christian König
2024-02-06 13:52     ` Sharma, Shashank

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.