All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] drm/amdgpu: move some functions into amdgpu_ttm.h
@ 2018-03-01 10:22 Christian König
       [not found] ` <20180301102244.1684-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2018-03-01 10:22 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk.Liu-5C7GfCeVMHo

Those belong to the TTM handling.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     | 3 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++++
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5bddfc1c0cb3..79fe13c125b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1834,9 +1834,6 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
 void amdgpu_device_gart_location(struct amdgpu_device *adev,
 				 struct amdgpu_gmc *mc);
 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
 					     const u32 *registers,
 					     const u32 array_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index d31491069f2f..0fba23c69e97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -75,6 +75,10 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
+void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 		       uint64_t dst_offset, uint32_t byte_count,
 		       struct reservation_object *resv,
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/4] drm/amdgpu: change amdgpu_ttm_set_active_vram_size
       [not found] ` <20180301102244.1684-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-03-01 10:22   ` Christian König
  2018-03-01 10:22   ` [PATCH 3/4] drm/amdgpu: ignore changes of buffer function status because of GPU resets Christian König
  2018-03-01 10:22   ` [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability Christian König
  2 siblings, 0 replies; 8+ messages in thread
From: Christian König @ 2018-03-01 10:22 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk.Liu-5C7GfCeVMHo

Instead of setting the active VRAM size directly provide a the info if
we can use the buffer functions or not.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 23 +++++++++++++++++------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  3 ++-
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c  |  4 ++--
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c  |  4 ++--
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c  |  4 ++--
 drivers/gpu/drm/amd/amdgpu/si_dma.c     |  4 ++--
 7 files changed, 29 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f126a5ae41b3..46d7a690a287 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1411,7 +1411,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 		adev->gmc.visible_vram_size = vis_vram_limit;
 
 	/* Change the size here instead of the init above so only lpfn is affected */
-	amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+	amdgpu_ttm_set_buffer_funcs_status(adev, false);
 #ifdef CONFIG_64BIT
 	adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
 						adev->gmc.visible_vram_size);
@@ -1526,17 +1526,28 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 	DRM_INFO("amdgpu: ttm finalized\n");
 }
 
-/* this should only be called at bootup or when userspace
- * isn't running */
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
+/**
+ * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: true when we can use buffer functions.
+ *
+ * Enable/disable use of buffer functions during suspend/resume. This should
+ * only be called at bootup or when userspace isn't running.
+ */
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 {
-	struct ttm_mem_type_manager *man;
+	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+	uint64_t size;
 
 	if (!adev->mman.initialized)
 		return;
 
-	man = &adev->mman.bdev.man[TTM_PL_VRAM];
 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
+	if (enable)
+		size = adev->gmc.real_vram_size;
+	else
+		size = adev->gmc.visible_vram_size;
 	man->size = size >> PAGE_SHIFT;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0fba23c69e97..b8117c6e51f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -77,7 +77,8 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
+					bool enable);
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 		       uint64_t dst_offset, uint32_t byte_count,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 69568cd1bb99..f48ea0dad875 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -310,7 +310,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 
 	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -510,7 +510,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, true);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6ccc9d43a7b8..6452101c7aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -339,7 +339,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 
 	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+		amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -484,7 +484,7 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
 		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, true);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 0c2b12ec0e9f..9c4efd4effc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -510,7 +510,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 
 	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+		amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -750,7 +750,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, true);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 87c01d958703..215743df0957 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -426,7 +426,7 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
 
 	if ((adev->mman.buffer_funcs_ring == sdma0) ||
 	    (adev->mman.buffer_funcs_ring == sdma1))
-		amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -668,7 +668,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
 		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, true);
 
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index acbf5afa4f38..b75d901ba3c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -121,7 +121,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
 		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
 
 		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, false);
 		ring->ready = false;
 	}
 }
@@ -184,7 +184,7 @@ static int si_dma_start(struct amdgpu_device *adev)
 		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
-			amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
+			amdgpu_ttm_set_buffer_funcs_status(adev, true);
 	}
 
 	return 0;
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/4] drm/amdgpu: ignore changes of buffer function status because of GPU resets
       [not found] ` <20180301102244.1684-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-03-01 10:22   ` [PATCH 2/4] drm/amdgpu: change amdgpu_ttm_set_active_vram_size Christian König
@ 2018-03-01 10:22   ` Christian König
  2018-03-01 10:22   ` [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability Christian König
  2 siblings, 0 replies; 8+ messages in thread
From: Christian König @ 2018-03-01 10:22 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk.Liu-5C7GfCeVMHo

When we reset the GPU we also disable/enable the SDMA, but we don't want
to change TTM idea of the VRAM size in the middle of that.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 46d7a690a287..2aa6823ef503 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1540,7 +1540,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
 	uint64_t size;
 
-	if (!adev->mman.initialized)
+	if (!adev->mman.initialized || adev->in_gpu_reset)
 		return;
 
 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability
       [not found] ` <20180301102244.1684-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2018-03-01 10:22   ` [PATCH 2/4] drm/amdgpu: change amdgpu_ttm_set_active_vram_size Christian König
  2018-03-01 10:22   ` [PATCH 3/4] drm/amdgpu: ignore changes of buffer function status because of GPU resets Christian König
@ 2018-03-01 10:22   ` Christian König
       [not found]     ` <20180301102244.1684-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
  2 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2018-03-01 10:22 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk.Liu-5C7GfCeVMHo

The ring status can change during GPU reset, but we still need to be
able to schedule TTM buffer moves in the meantime.

Otherwise we can ran into problems because of aborted move/fill
operations during GPU resets.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
 2 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2aa6823ef503..53b34b3b8232 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -213,9 +213,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 	abo = ttm_to_amdgpu_bo(bo);
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		if (adev->mman.buffer_funcs &&
-		    adev->mman.buffer_funcs_ring &&
-		    adev->mman.buffer_funcs_ring->ready == false) {
+		if (!adev->mman.buffer_funcs_enabled) {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
@@ -331,7 +329,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
 					AMDGPU_GPU_PAGE_SIZE);
 
-	if (!ring->ready) {
+	if (!adev->mman.buffer_funcs_enabled) {
 		DRM_ERROR("Trying to move memory with ring turned off.\n");
 		return -EINVAL;
 	}
@@ -577,12 +575,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 		amdgpu_move_null(bo, new_mem);
 		return 0;
 	}
-	if (adev->mman.buffer_funcs == NULL ||
-	    adev->mman.buffer_funcs_ring == NULL ||
-	    !adev->mman.buffer_funcs_ring->ready) {
-		/* use memcpy */
+
+	if (!adev->mman.buffer_funcs_enabled)
 		goto memcpy;
-	}
 
 	if (old_mem->mem_type == TTM_PL_VRAM &&
 	    new_mem->mem_type == TTM_PL_SYSTEM) {
@@ -1549,6 +1544,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 	else
 		size = adev->gmc.visible_vram_size;
 	man->size = size >> PAGE_SHIFT;
+	adev->mman.buffer_funcs_enabled = enable;
 }
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 	WARN_ON(job->ibs[0].length_dw > num_dw);
 	if (direct_submit) {
+		WARN_ON(!ring->ready);
 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
 				       NULL, fence);
 		job->fence = dma_fence_get(*fence);
@@ -1720,7 +1717,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 	struct amdgpu_job *job;
 	int r;
 
-	if (!ring->ready) {
+	if (!adev->mman.buffer_funcs_enabled) {
 		DRM_ERROR("Trying to clear memory with ring turned off.\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index b8117c6e51f1..6ea7de863041 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -53,6 +53,7 @@ struct amdgpu_mman {
 	/* buffer handling */
 	const struct amdgpu_buffer_funcs	*buffer_funcs;
 	struct amdgpu_ring			*buffer_funcs_ring;
+	bool					buffer_funcs_enabled;
 
 	struct mutex				gtt_window_lock;
 	/* Scheduler entity for buffer moves */
-- 
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* RE: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability
       [not found]     ` <20180301102244.1684-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
@ 2018-03-01 10:33       ` Liu, Monk
       [not found]         ` <BLUPR12MB0449AAD08D9D289E2B67D5FD84C60-7LeqcoF/hwpTIQvHjXdJlwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Liu, Monk @ 2018-03-01 10:33 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

> int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@ -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 	WARN_ON(job->ibs[0].length_dw > num_dw);
 	if (direct_submit) {
+		WARN_ON(!ring->ready);
 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
 				       NULL, fence);
 		job->fence = dma_fence_get(*fence);

[ML] in direct_submit case if ring->ready is false why we continue and only give a warning on that ? shouldn't we just abort or use scheduler way ??


/Monk


-----Original Message-----
From: Christian König [mailto:ckoenig.leichtzumerken@gmail.com] 
Sent: 2018年3月1日 18:23
To: amd-gfx@lists.freedesktop.org
Cc: Liu, Monk <Monk.Liu@amd.com>
Subject: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability

The ring status can change during GPU reset, but we still need to be able to schedule TTM buffer moves in the meantime.

Otherwise we can ran into problems because of aborted move/fill operations during GPU resets.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +++++++----------  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
 2 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2aa6823ef503..53b34b3b8232 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -213,9 +213,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 	abo = ttm_to_amdgpu_bo(bo);
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		if (adev->mman.buffer_funcs &&
-		    adev->mman.buffer_funcs_ring &&
-		    adev->mman.buffer_funcs_ring->ready == false) {
+		if (!adev->mman.buffer_funcs_enabled) {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { @@ -331,7 +329,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
 					AMDGPU_GPU_PAGE_SIZE);
 
-	if (!ring->ready) {
+	if (!adev->mman.buffer_funcs_enabled) {
 		DRM_ERROR("Trying to move memory with ring turned off.\n");
 		return -EINVAL;
 	}
@@ -577,12 +575,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 		amdgpu_move_null(bo, new_mem);
 		return 0;
 	}
-	if (adev->mman.buffer_funcs == NULL ||
-	    adev->mman.buffer_funcs_ring == NULL ||
-	    !adev->mman.buffer_funcs_ring->ready) {
-		/* use memcpy */
+
+	if (!adev->mman.buffer_funcs_enabled)
 		goto memcpy;
-	}
 
 	if (old_mem->mem_type == TTM_PL_VRAM &&
 	    new_mem->mem_type == TTM_PL_SYSTEM) { @@ -1549,6 +1544,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 	else
 		size = adev->gmc.visible_vram_size;
 	man->size = size >> PAGE_SHIFT;
+	adev->mman.buffer_funcs_enabled = enable;
 }
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@ -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 	WARN_ON(job->ibs[0].length_dw > num_dw);
 	if (direct_submit) {
+		WARN_ON(!ring->ready);
 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
 				       NULL, fence);
 		job->fence = dma_fence_get(*fence);
@@ -1720,7 +1717,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 	struct amdgpu_job *job;
 	int r;
 
-	if (!ring->ready) {
+	if (!adev->mman.buffer_funcs_enabled) {
 		DRM_ERROR("Trying to clear memory with ring turned off.\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index b8117c6e51f1..6ea7de863041 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -53,6 +53,7 @@ struct amdgpu_mman {
 	/* buffer handling */
 	const struct amdgpu_buffer_funcs	*buffer_funcs;
 	struct amdgpu_ring			*buffer_funcs_ring;
+	bool					buffer_funcs_enabled;
 
 	struct mutex				gtt_window_lock;
 	/* Scheduler entity for buffer moves */
--
2.14.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability
       [not found]         ` <BLUPR12MB0449AAD08D9D289E2B67D5FD84C60-7LeqcoF/hwpTIQvHjXdJlwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-03-01 10:49           ` Christian König
       [not found]             ` <e2570620-cca3-40c9-3e76-dfc44f49bb60-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2018-03-01 10:49 UTC (permalink / raw)
  To: Liu, Monk, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 01.03.2018 um 11:33 schrieb Liu, Monk:
>> int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@ -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
>   	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
>   	WARN_ON(job->ibs[0].length_dw > num_dw);
>   	if (direct_submit) {
> +		WARN_ON(!ring->ready);
>   		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
>   				       NULL, fence);
>   		job->fence = dma_fence_get(*fence);
>
> [ML] in direct_submit case if ring->ready is false why we continue and only give a warning on that ? shouldn't we just abort or use scheduler way ??

When we use direct submission the scheduler is turned off. So we could 
return an error, but using the scheduler probably results in a deadlock.

Christian.

>
>
> /Monk
>
>
> -----Original Message-----
> From: Christian König [mailto:ckoenig.leichtzumerken@gmail.com]
> Sent: 2018年3月1日 18:23
> To: amd-gfx@lists.freedesktop.org
> Cc: Liu, Monk <Monk.Liu@amd.com>
> Subject: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability
>
> The ring status can change during GPU reset, but we still need to be able to schedule TTM buffer moves in the meantime.
>
> Otherwise we can ran into problems because of aborted move/fill operations during GPU resets.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +++++++----------  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
>   2 files changed, 8 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 2aa6823ef503..53b34b3b8232 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -213,9 +213,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>   	abo = ttm_to_amdgpu_bo(bo);
>   	switch (bo->mem.mem_type) {
>   	case TTM_PL_VRAM:
> -		if (adev->mman.buffer_funcs &&
> -		    adev->mman.buffer_funcs_ring &&
> -		    adev->mman.buffer_funcs_ring->ready == false) {
> +		if (!adev->mman.buffer_funcs_enabled) {
>   			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
>   		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
>   			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { @@ -331,7 +329,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
>   	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
>   					AMDGPU_GPU_PAGE_SIZE);
>   
> -	if (!ring->ready) {
> +	if (!adev->mman.buffer_funcs_enabled) {
>   		DRM_ERROR("Trying to move memory with ring turned off.\n");
>   		return -EINVAL;
>   	}
> @@ -577,12 +575,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
>   		amdgpu_move_null(bo, new_mem);
>   		return 0;
>   	}
> -	if (adev->mman.buffer_funcs == NULL ||
> -	    adev->mman.buffer_funcs_ring == NULL ||
> -	    !adev->mman.buffer_funcs_ring->ready) {
> -		/* use memcpy */
> +
> +	if (!adev->mman.buffer_funcs_enabled)
>   		goto memcpy;
> -	}
>   
>   	if (old_mem->mem_type == TTM_PL_VRAM &&
>   	    new_mem->mem_type == TTM_PL_SYSTEM) { @@ -1549,6 +1544,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>   	else
>   		size = adev->gmc.visible_vram_size;
>   	man->size = size >> PAGE_SHIFT;
> +	adev->mman.buffer_funcs_enabled = enable;
>   }
>   
>   int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@ -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
>   	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
>   	WARN_ON(job->ibs[0].length_dw > num_dw);
>   	if (direct_submit) {
> +		WARN_ON(!ring->ready);
>   		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
>   				       NULL, fence);
>   		job->fence = dma_fence_get(*fence);
> @@ -1720,7 +1717,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
>   	struct amdgpu_job *job;
>   	int r;
>   
> -	if (!ring->ready) {
> +	if (!adev->mman.buffer_funcs_enabled) {
>   		DRM_ERROR("Trying to clear memory with ring turned off.\n");
>   		return -EINVAL;
>   	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index b8117c6e51f1..6ea7de863041 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -53,6 +53,7 @@ struct amdgpu_mman {
>   	/* buffer handling */
>   	const struct amdgpu_buffer_funcs	*buffer_funcs;
>   	struct amdgpu_ring			*buffer_funcs_ring;
> +	bool					buffer_funcs_enabled;
>   
>   	struct mutex				gtt_window_lock;
>   	/* Scheduler entity for buffer moves */
> --
> 2.14.1
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability
       [not found]             ` <e2570620-cca3-40c9-3e76-dfc44f49bb60-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-03-01 10:55               ` Liu, Monk
       [not found]                 ` <BLUPR12MB0449B23C2811C12EE51BF50E84C60-7LeqcoF/hwpTIQvHjXdJlwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 8+ messages in thread
From: Liu, Monk @ 2018-03-01 10:55 UTC (permalink / raw)
  To: Koenig, Christian, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Looks aborting in such case is the safe way, otherwise the fence_wait() outside will still fail 

-----Original Message-----
From: Christian König [mailto:ckoenig.leichtzumerken@gmail.com] 
Sent: 2018年3月1日 18:50
To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability

Am 01.03.2018 um 11:33 schrieb Liu, Monk:
>> int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@ 
>> -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, 
>> uint64_t src_offset,
>   	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
>   	WARN_ON(job->ibs[0].length_dw > num_dw);
>   	if (direct_submit) {
> +		WARN_ON(!ring->ready);
>   		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
>   				       NULL, fence);
>   		job->fence = dma_fence_get(*fence);
>
> [ML] in direct_submit case if ring->ready is false why we continue and only give a warning on that ? shouldn't we just abort or use scheduler way ??

When we use direct submission the scheduler is turned off. So we could return an error, but using the scheduler probably results in a deadlock.

Christian.

>
>
> /Monk
>
>
> -----Original Message-----
> From: Christian König [mailto:ckoenig.leichtzumerken@gmail.com]
> Sent: 2018年3月1日 18:23
> To: amd-gfx@lists.freedesktop.org
> Cc: Liu, Monk <Monk.Liu@amd.com>
> Subject: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs 
> availability
>
> The ring status can change during GPU reset, but we still need to be able to schedule TTM buffer moves in the meantime.
>
> Otherwise we can ran into problems because of aborted move/fill operations during GPU resets.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +++++++----------  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
>   2 files changed, 8 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 2aa6823ef503..53b34b3b8232 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -213,9 +213,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>   	abo = ttm_to_amdgpu_bo(bo);
>   	switch (bo->mem.mem_type) {
>   	case TTM_PL_VRAM:
> -		if (adev->mman.buffer_funcs &&
> -		    adev->mman.buffer_funcs_ring &&
> -		    adev->mman.buffer_funcs_ring->ready == false) {
> +		if (!adev->mman.buffer_funcs_enabled) {
>   			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
>   		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
>   			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { @@ -331,7 +329,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
>   	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
>   					AMDGPU_GPU_PAGE_SIZE);
>   
> -	if (!ring->ready) {
> +	if (!adev->mman.buffer_funcs_enabled) {
>   		DRM_ERROR("Trying to move memory with ring turned off.\n");
>   		return -EINVAL;
>   	}
> @@ -577,12 +575,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
>   		amdgpu_move_null(bo, new_mem);
>   		return 0;
>   	}
> -	if (adev->mman.buffer_funcs == NULL ||
> -	    adev->mman.buffer_funcs_ring == NULL ||
> -	    !adev->mman.buffer_funcs_ring->ready) {
> -		/* use memcpy */
> +
> +	if (!adev->mman.buffer_funcs_enabled)
>   		goto memcpy;
> -	}
>   
>   	if (old_mem->mem_type == TTM_PL_VRAM &&
>   	    new_mem->mem_type == TTM_PL_SYSTEM) { @@ -1549,6 +1544,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>   	else
>   		size = adev->gmc.visible_vram_size;
>   	man->size = size >> PAGE_SHIFT;
> +	adev->mman.buffer_funcs_enabled = enable;
>   }
>   
>   int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@ -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
>   	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
>   	WARN_ON(job->ibs[0].length_dw > num_dw);
>   	if (direct_submit) {
> +		WARN_ON(!ring->ready);
>   		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
>   				       NULL, fence);
>   		job->fence = dma_fence_get(*fence); @@ -1720,7 +1717,7 @@ int 
> amdgpu_fill_buffer(struct amdgpu_bo *bo,
>   	struct amdgpu_job *job;
>   	int r;
>   
> -	if (!ring->ready) {
> +	if (!adev->mman.buffer_funcs_enabled) {
>   		DRM_ERROR("Trying to clear memory with ring turned off.\n");
>   		return -EINVAL;
>   	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index b8117c6e51f1..6ea7de863041 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -53,6 +53,7 @@ struct amdgpu_mman {
>   	/* buffer handling */
>   	const struct amdgpu_buffer_funcs	*buffer_funcs;
>   	struct amdgpu_ring			*buffer_funcs_ring;
> +	bool					buffer_funcs_enabled;
>   
>   	struct mutex				gtt_window_lock;
>   	/* Scheduler entity for buffer moves */
> --
> 2.14.1
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability
       [not found]                 ` <BLUPR12MB0449B23C2811C12EE51BF50E84C60-7LeqcoF/hwpTIQvHjXdJlwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-03-01 11:53                   ` Christian König
  0 siblings, 0 replies; 8+ messages in thread
From: Christian König @ 2018-03-01 11:53 UTC (permalink / raw)
  To: Liu, Monk, Koenig, Christian, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 01.03.2018 um 11:55 schrieb Liu, Monk:
> Looks aborting in such case is the safe way, otherwise the fence_wait() outside will still fail

Good point, just send a v2 of that patch which does exactly that.

Please review,
Christian.

>
> -----Original Message-----
> From: Christian König [mailto:ckoenig.leichtzumerken@gmail.com]
> Sent: 2018年3月1日 18:50
> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability
>
> Am 01.03.2018 um 11:33 schrieb Liu, Monk:
>>> int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@
>>> -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
>>> uint64_t src_offset,
>>    	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
>>    	WARN_ON(job->ibs[0].length_dw > num_dw);
>>    	if (direct_submit) {
>> +		WARN_ON(!ring->ready);
>>    		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
>>    				       NULL, fence);
>>    		job->fence = dma_fence_get(*fence);
>>
>> [ML] in direct_submit case if ring->ready is false why we continue and only give a warning on that ? shouldn't we just abort or use scheduler way ??
> When we use direct submission the scheduler is turned off. So we could return an error, but using the scheduler probably results in a deadlock.
>
> Christian.
>
>>
>> /Monk
>>
>>
>> -----Original Message-----
>> From: Christian König [mailto:ckoenig.leichtzumerken@gmail.com]
>> Sent: 2018年3月1日 18:23
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Liu, Monk <Monk.Liu@amd.com>
>> Subject: [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs
>> availability
>>
>> The ring status can change during GPU reset, but we still need to be able to schedule TTM buffer moves in the meantime.
>>
>> Otherwise we can ran into problems because of aborted move/fill operations during GPU resets.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +++++++----------  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
>>    2 files changed, 8 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 2aa6823ef503..53b34b3b8232 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -213,9 +213,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>>    	abo = ttm_to_amdgpu_bo(bo);
>>    	switch (bo->mem.mem_type) {
>>    	case TTM_PL_VRAM:
>> -		if (adev->mman.buffer_funcs &&
>> -		    adev->mman.buffer_funcs_ring &&
>> -		    adev->mman.buffer_funcs_ring->ready == false) {
>> +		if (!adev->mman.buffer_funcs_enabled) {
>>    			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
>>    		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
>>    			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { @@ -331,7 +329,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
>>    	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
>>    					AMDGPU_GPU_PAGE_SIZE);
>>    
>> -	if (!ring->ready) {
>> +	if (!adev->mman.buffer_funcs_enabled) {
>>    		DRM_ERROR("Trying to move memory with ring turned off.\n");
>>    		return -EINVAL;
>>    	}
>> @@ -577,12 +575,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
>>    		amdgpu_move_null(bo, new_mem);
>>    		return 0;
>>    	}
>> -	if (adev->mman.buffer_funcs == NULL ||
>> -	    adev->mman.buffer_funcs_ring == NULL ||
>> -	    !adev->mman.buffer_funcs_ring->ready) {
>> -		/* use memcpy */
>> +
>> +	if (!adev->mman.buffer_funcs_enabled)
>>    		goto memcpy;
>> -	}
>>    
>>    	if (old_mem->mem_type == TTM_PL_VRAM &&
>>    	    new_mem->mem_type == TTM_PL_SYSTEM) { @@ -1549,6 +1544,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>>    	else
>>    		size = adev->gmc.visible_vram_size;
>>    	man->size = size >> PAGE_SHIFT;
>> +	adev->mman.buffer_funcs_enabled = enable;
>>    }
>>    
>>    int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) @@ -1684,6 +1680,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
>>    	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
>>    	WARN_ON(job->ibs[0].length_dw > num_dw);
>>    	if (direct_submit) {
>> +		WARN_ON(!ring->ready);
>>    		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
>>    				       NULL, fence);
>>    		job->fence = dma_fence_get(*fence); @@ -1720,7 +1717,7 @@ int
>> amdgpu_fill_buffer(struct amdgpu_bo *bo,
>>    	struct amdgpu_job *job;
>>    	int r;
>>    
>> -	if (!ring->ready) {
>> +	if (!adev->mman.buffer_funcs_enabled) {
>>    		DRM_ERROR("Trying to clear memory with ring turned off.\n");
>>    		return -EINVAL;
>>    	}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> index b8117c6e51f1..6ea7de863041 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> @@ -53,6 +53,7 @@ struct amdgpu_mman {
>>    	/* buffer handling */
>>    	const struct amdgpu_buffer_funcs	*buffer_funcs;
>>    	struct amdgpu_ring			*buffer_funcs_ring;
>> +	bool					buffer_funcs_enabled;
>>    
>>    	struct mutex				gtt_window_lock;
>>    	/* Scheduler entity for buffer moves */
>> --
>> 2.14.1
>>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-03-01 11:53 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-01 10:22 [PATCH 1/4] drm/amdgpu: move some functions into amdgpu_ttm.h Christian König
     [not found] ` <20180301102244.1684-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-03-01 10:22   ` [PATCH 2/4] drm/amdgpu: change amdgpu_ttm_set_active_vram_size Christian König
2018-03-01 10:22   ` [PATCH 3/4] drm/amdgpu: ignore changes of buffer function status because of GPU resets Christian König
2018-03-01 10:22   ` [PATCH 4/4] drm/amdgpu: use separate status for buffer funcs availability Christian König
     [not found]     ` <20180301102244.1684-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-03-01 10:33       ` Liu, Monk
     [not found]         ` <BLUPR12MB0449AAD08D9D289E2B67D5FD84C60-7LeqcoF/hwpTIQvHjXdJlwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-03-01 10:49           ` Christian König
     [not found]             ` <e2570620-cca3-40c9-3e76-dfc44f49bb60-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-03-01 10:55               ` Liu, Monk
     [not found]                 ` <BLUPR12MB0449B23C2811C12EE51BF50E84C60-7LeqcoF/hwpTIQvHjXdJlwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-03-01 11:53                   ` Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.