All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] TDR guilty job feature
@ 2017-05-08  6:51 Monk Liu
       [not found] ` <1494226269-8837-1-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Monk Liu @ 2017-05-08  6:51 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk Liu

for SRIOV gpu reset:
this feature allows driver to judge how much time can a job hang for
and will kickout this job from ring_mirror list when doing recover if 
the threshold is exceeded.


Monk Liu (4):
  drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
  drm/amdgpu:use job* to replace voluntary
  drm/amdgpu:only call flr_work under infinite timeout
  drm/amdgpu/SRIOV:implement guilty job TDR for

 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 46 +++++++++++++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c       |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c     | 11 ++++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c       |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c       |  6 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c      |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h      |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h      |  2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c         |  2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c         | 15 +++++----
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c        |  1 +
 drivers/gpu/drm/amd/amdgpu/soc15.c            |  4 +--
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 11 ++++++-
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 ++++
 18 files changed, 92 insertions(+), 31 deletions(-)

-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found] ` <1494226269-8837-1-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
@ 2017-05-08  6:51   ` Monk Liu
       [not found]     ` <1494226269-8837-2-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
  2017-05-08  6:51   ` [PATCH 2/4] drm/amdgpu:use job* to replace voluntary Monk Liu
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 16+ messages in thread
From: Monk Liu @ 2017-05-08  6:51 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk Liu

because we don't want to do sriov-gpu-reset under certain
cases, so just split those two funtion and don't invoke
sr-iov one from bare-metal one.

Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
 5 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 45a60a6..4985a7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 	int resched;
 	bool need_full_reset;
 
-	if (amdgpu_sriov_vf(adev))
-		return amdgpu_sriov_gpu_reset(adev, true);
-
 	if (!amdgpu_check_soft_reset(adev)) {
 		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5772ef2..d7523d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
 	struct amdgpu_device *adev = dev->dev_private;
 
 	seq_printf(m, "gpu reset\n");
-	amdgpu_gpu_reset(adev);
+	if (!amdgpu_sriov_vf(adev))
+		amdgpu_gpu_reset(adev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 67be795..5bcbea0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
 {
-	if (r == -EDEADLK) {
+	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {
 		r = amdgpu_gpu_reset(adev);
 		if (!r)
 			r = -EAGAIN;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f8a6c95..49c6e6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
 						  reset_work);
 
-	amdgpu_gpu_reset(adev);
+	if (!amdgpu_sriov_vf(adev))
+		amdgpu_gpu_reset(adev);
 }
 
 /* Disable *all* interrupts */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 690ef3d..c7718af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
 		  job->base.sched->name,
 		  atomic_read(&job->ring->fence_drv.last_seq),
 		  job->ring->fence_drv.sync_seq);
-	amdgpu_gpu_reset(job->adev);
+
+	if (amdgpu_sriov_vf(job->adev))
+		amdgpu_sriov_gpu_reset(job->adev, true);
+	else
+		amdgpu_gpu_reset(job->adev);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/4] drm/amdgpu:use job* to replace voluntary
       [not found] ` <1494226269-8837-1-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
  2017-05-08  6:51   ` [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset Monk Liu
@ 2017-05-08  6:51   ` Monk Liu
       [not found]     ` <1494226269-8837-3-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
  2017-05-08  6:51   ` [PATCH 3/4] drm/amdgpu:only call flr_work under infinite timeout Monk Liu
  2017-05-08  6:51   ` [PATCH 4/4] drm/amdgpu/SRIOV:implement guilty job TDR for Monk Liu
  3 siblings, 1 reply; 16+ messages in thread
From: Monk Liu @ 2017-05-08  6:51 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk Liu

that way we can know which job cause hang and
can do per sched reset/recovery instead of all
sched.

Change-Id: Ifc98cd74b2d93823c489de6a89087ba188957eff
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 2 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c      | 2 +-
 5 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4985a7e..0e5f314 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2529,14 +2529,13 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
  * amdgpu_sriov_gpu_reset - reset the asic
  *
  * @adev: amdgpu device pointer
- * @voluntary: if this reset is requested by guest.
- *             (true means by guest and false means by HYPERVISOR )
+ * @job: which job trigger hang
  *
  * Attempt the reset the GPU if it has hung (all asics).
  * for SRIOV case.
  * Returns 0 for success or an error on failure.
  */
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
+int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 {
 	int i, r = 0;
 	int resched;
@@ -2566,7 +2565,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
 	amdgpu_fence_driver_force_completion(adev);
 
 	/* request to take full control of GPU before re-initialization  */
-	if (voluntary)
+	if (job)
 		amdgpu_virt_reset_gpu(adev);
 	else
 		amdgpu_virt_request_full_gpu(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index c7718af..3c6fb6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -38,7 +38,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
 		  job->ring->fence_drv.sync_seq);
 
 	if (amdgpu_sriov_vf(job->adev))
-		amdgpu_sriov_gpu_reset(job->adev, true);
+		amdgpu_sriov_gpu_reset(job->adev, job);
 	else
 		amdgpu_gpu_reset(job->adev);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 6f2b7df..9e1062e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -96,7 +96,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
+int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 96139ec..69da52d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -243,7 +243,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
 	}
 
 	/* Trigger recovery due to world switch failure */
-	amdgpu_sriov_gpu_reset(adev, false);
+	amdgpu_sriov_gpu_reset(adev, NULL);
 }
 
 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index f0d64f1..1cdf5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -514,7 +514,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
 	}
 
 	/* Trigger recovery due to world switch failure */
-	amdgpu_sriov_gpu_reset(adev, false);
+	amdgpu_sriov_gpu_reset(adev, NULL);
 }
 
 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/4] drm/amdgpu:only call flr_work under infinite timeout
       [not found] ` <1494226269-8837-1-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
  2017-05-08  6:51   ` [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset Monk Liu
  2017-05-08  6:51   ` [PATCH 2/4] drm/amdgpu:use job* to replace voluntary Monk Liu
@ 2017-05-08  6:51   ` Monk Liu
       [not found]     ` <1494226269-8837-4-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
  2017-05-08  6:51   ` [PATCH 4/4] drm/amdgpu/SRIOV:implement guilty job TDR for Monk Liu
  3 siblings, 1 reply; 16+ messages in thread
From: Monk Liu @ 2017-05-08  6:51 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk Liu

Change-Id: I541aa5109f4fcab06ece4761a09dc7e053ec6837
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 1cdf5cc..0109b5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -537,12 +537,15 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
 {
 	int r;
 
-	/* see what event we get */
-	r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
-
-	/* only handle FLR_NOTIFY now */
-	if (!r)
-		schedule_work(&adev->virt.flr_work);
+	/* trigger gpu-reset by hypervisor only if TDR disbaled */
+	if (msecs_to_jiffies(amdgpu_lockup_timeout) == MAX_SCHEDULE_TIMEOUT) {
+		/* see what event we get */
+		r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
+
+		/* only handle FLR_NOTIFY now */
+		if (!r)
+			schedule_work(&adev->virt.flr_work);
+	}
 
 	return 0;
 }
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/4] drm/amdgpu/SRIOV:implement guilty job TDR for
       [not found] ` <1494226269-8837-1-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
                     ` (2 preceding siblings ...)
  2017-05-08  6:51   ` [PATCH 3/4] drm/amdgpu:only call flr_work under infinite timeout Monk Liu
@ 2017-05-08  6:51   ` Monk Liu
       [not found]     ` <1494226269-8837-5-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
  3 siblings, 1 reply; 16+ messages in thread
From: Monk Liu @ 2017-05-08  6:51 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Monk Liu

1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.

by default this threshold is 1 so a job will be kicked out
after it hang.

2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.

3,unblock sriov_gpu_reset for AI family.

TODO:
when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.

Change-Id: I7b89c19a3de93249db570d0a80522176b1525a09
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 36 ++++++++++++++++++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c       |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c     |  8 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c      |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h      |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  1 +
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c        |  1 +
 drivers/gpu/drm/amd/amdgpu/soc15.c            |  4 +--
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 11 +++++++-
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 ++++++
 12 files changed, 68 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 90a69bf..93bcea2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -111,6 +111,7 @@ extern int amdgpu_prim_buf_per_se;
 extern int amdgpu_pos_buf_per_se;
 extern int amdgpu_cntl_sb_buf_per_se;
 extern int amdgpu_param_buf_per_se;
+extern int amdgpu_job_hang_limit;
 
 #define AMDGPU_DEFAULT_GTT_SIZE_MB		3072ULL /* 3GB by default */
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index b4bbbb3..23afc58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -52,6 +52,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 		struct amd_sched_rq *rq;
 
 		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+
+		if (ring == &adev->gfx.kiq.ring)
+			continue;
+
 		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
 					  rq, amdgpu_sched_jobs);
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0e5f314..f3990fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2537,7 +2537,7 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
  */
 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 {
-	int i, r = 0;
+	int i, j, r = 0;
 	int resched;
 	struct amdgpu_bo *bo, *tmp;
 	struct amdgpu_ring *ring;
@@ -2550,19 +2550,30 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
-	/* block scheduler */
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		ring = adev->rings[i];
+	/* we start from the ring trigger GPU hang */
+	j = job ? job->ring->idx : 0;
+
+	if (job)
+		if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
+			amd_sched_job_kickout(&job->base);
 
+	/* block scheduler */
+	for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
+		ring = adev->rings[i % AMDGPU_MAX_RINGS];
 		if (!ring || !ring->sched.thread)
 			continue;
 
 		kthread_park(ring->sched.thread);
+
+		if (job && j != i)
+			continue;
+
+		/* only do job_reset on the hang ring if @job not NULL */
 		amd_sched_hw_job_reset(&ring->sched);
-	}
 
-	/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
-	amdgpu_fence_driver_force_completion(adev);
+		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+		amdgpu_fence_driver_force_completion_ring(ring);
+	}
 
 	/* request to take full control of GPU before re-initialization  */
 	if (job)
@@ -2615,11 +2626,16 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 	}
 	fence_put(fence);
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		struct amdgpu_ring *ring = adev->rings[i];
+	for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
+		ring = adev->rings[i % AMDGPU_MAX_RINGS];
 		if (!ring || !ring->sched.thread)
 			continue;
 
+		if (job && j != i) {
+			kthread_unpark(ring->sched.thread);
+			continue;
+		}
+
 		amd_sched_job_recovery(&ring->sched);
 		kthread_unpark(ring->sched.thread);
 	}
@@ -2629,6 +2645,8 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 	if (r) {
 		/* bad news, how to tell it to userspace ? */
 		dev_info(adev->dev, "GPU reset failed\n");
+	} else {
+		dev_info(adev->dev, "GPU reset successed!\n");
 	}
 
 	adev->gfx.in_reset = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 416908a..fd3691a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -112,6 +112,7 @@ int amdgpu_prim_buf_per_se = 0;
 int amdgpu_pos_buf_per_se = 0;
 int amdgpu_cntl_sb_buf_per_se = 0;
 int amdgpu_param_buf_per_se = 0;
+int amdgpu_job_hang_limit = 0;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -237,6 +238,9 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
 MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
 module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
 
+MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
+module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
+
 
 static const struct pci_device_id pciidlist[] = {
 #ifdef  CONFIG_DRM_AMDGPU_SI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d7523d1..be9aed1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -541,6 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
 	}
 }
 
+void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring)
+{
+	if (ring)
+		amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
+}
+
 /*
  * Common fence implementation
  */
@@ -589,6 +595,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
 	struct fence *f = container_of(rcu, struct fence, rcu);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	kmem_cache_free(amdgpu_fence_slab, fence);
+	//printk("fence free:%p\n", f);
 }
 
 /**
@@ -601,6 +608,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
  */
 static void amdgpu_fence_release(struct fence *f)
 {
+	//printk("call_rcu on fence:%p\n",f);
 	call_rcu(&f->rcu, amdgpu_fence_free);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 130357b..acb38a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -180,6 +180,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 			amdgpu_sched_hw_submission);
 		if (r)
 			return r;
+
+		printk("ring:%p, init, idx=%d\n", ring, ring->idx);
 	}
 
 	if (ring->funcs->support_64bit_ptrs) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 981ef08..03e88c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -76,6 +76,7 @@ struct amdgpu_fence_driver {
 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring);
 
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 				  unsigned num_hw_submission);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c3fb2f9..f6e79c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -99,6 +99,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+	printk("ring %p,ring->idx=%d\n",ring, ring->idx);
 	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
 				  rq, amdgpu_sched_jobs);
 	if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index f84d642..6a8020b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1576,6 +1576,7 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
 	if (adev->mman.buffer_funcs == NULL) {
 		adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+		printk("%s\n",__func__);
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4e514b2..ed3c927 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -482,8 +482,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 #endif
 		amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
-		amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
-		amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
+		//amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
+		//amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 6f4e31f..4e97e6d 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -390,9 +390,18 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
 					  &s_job->s_fence->cb)) {
 			fence_put(s_job->s_fence->parent);
 			s_job->s_fence->parent = NULL;
+			atomic_dec(&sched->hw_rq_count);
 		}
 	}
-	atomic_set(&sched->hw_rq_count, 0);
+	spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_kickout(struct amd_sched_job *s_job)
+{
+	struct amd_gpu_scheduler *sched = s_job->sched;
+
+	spin_lock(&sched->job_list_lock);
+	list_del_init(&s_job->node);
 	spin_unlock(&sched->job_list_lock);
 }
 
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 8cb41d3..59694f3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -81,6 +81,7 @@ struct amd_sched_job {
 	struct list_head		node;
 	struct delayed_work		work_tdr;
 	uint64_t			id;
+	atomic_t karma;
 };
 
 extern const struct fence_ops amd_sched_fence_ops_scheduled;
@@ -96,6 +97,11 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 	return NULL;
 }
 
+static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
+{
+	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+}
+
 /**
  * Define the backend operations called by the scheduler,
  * these functions should be implemented in driver side
@@ -158,4 +164,5 @@ int amd_sched_job_init(struct amd_sched_job *job,
 		       void *owner);
 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
+void amd_sched_job_kickout(struct amd_sched_job *s_job);
 #endif
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* RE: [PATCH 4/4] drm/amdgpu/SRIOV:implement guilty job TDR for
       [not found]     ` <1494226269-8837-5-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
@ 2017-05-08  7:00       ` Liu, Monk
  0 siblings, 0 replies; 16+ messages in thread
From: Liu, Monk @ 2017-05-08  7:00 UTC (permalink / raw)
  To: Liu, Monk, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Sorry , drop this one, this one doesn't remove debug code

Send another one after cleanups.


-----Original Message-----
From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Monk Liu
Sent: Monday, May 08, 2017 2:51 PM
To: amd-gfx@lists.freedesktop.org
Cc: Liu, Monk <Monk.Liu@amd.com>
Subject: [PATCH 4/4] drm/amdgpu/SRIOV:implement guilty job TDR for

1,TDR will kickout guilty job if it hang exceed the threshold of the given one from kernel paramter "job_hang_limit", that way a bad command stream will not infinitly cause GPU hang.

by default this threshold is 1 so a job will be kicked out after it hang.

2,if a job timeout TDR routine will not reset all sched/ring, instead if will only reset on the givn one which is indicated by @job of amdgpu_sriov_gpu_reset, that way we don't need to reset and recover each sched/ring if we already know which job cause GPU hang.

3,unblock sriov_gpu_reset for AI family.

TODO:
when a job is considered as guilty, we should mark some flag in its fence status flag, and let UMD side aware that this fence signaling is not due to job complete but job hang.

Change-Id: I7b89c19a3de93249db570d0a80522176b1525a09
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 36 ++++++++++++++++++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c       |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c     |  8 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c      |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h      |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  1 +
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c        |  1 +
 drivers/gpu/drm/amd/amdgpu/soc15.c            |  4 +--
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 11 +++++++-  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  7 ++++++
 12 files changed, 68 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 90a69bf..93bcea2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -111,6 +111,7 @@ extern int amdgpu_prim_buf_per_se;  extern int amdgpu_pos_buf_per_se;  extern int amdgpu_cntl_sb_buf_per_se;  extern int amdgpu_param_buf_per_se;
+extern int amdgpu_job_hang_limit;
 
 #define AMDGPU_DEFAULT_GTT_SIZE_MB		3072ULL /* 3GB by default */
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index b4bbbb3..23afc58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -52,6 +52,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 		struct amd_sched_rq *rq;
 
 		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+
+		if (ring == &adev->gfx.kiq.ring)
+			continue;
+
 		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
 					  rq, amdgpu_sched_jobs);
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0e5f314..f3990fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2537,7 +2537,7 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
  */
 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)  {
-	int i, r = 0;
+	int i, j, r = 0;
 	int resched;
 	struct amdgpu_bo *bo, *tmp;
 	struct amdgpu_ring *ring;
@@ -2550,19 +2550,30 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
-	/* block scheduler */
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		ring = adev->rings[i];
+	/* we start from the ring trigger GPU hang */
+	j = job ? job->ring->idx : 0;
+
+	if (job)
+		if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
+			amd_sched_job_kickout(&job->base);
 
+	/* block scheduler */
+	for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
+		ring = adev->rings[i % AMDGPU_MAX_RINGS];
 		if (!ring || !ring->sched.thread)
 			continue;
 
 		kthread_park(ring->sched.thread);
+
+		if (job && j != i)
+			continue;
+
+		/* only do job_reset on the hang ring if @job not NULL */
 		amd_sched_hw_job_reset(&ring->sched);
-	}
 
-	/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
-	amdgpu_fence_driver_force_completion(adev);
+		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+		amdgpu_fence_driver_force_completion_ring(ring);
+	}
 
 	/* request to take full control of GPU before re-initialization  */
 	if (job)
@@ -2615,11 +2626,16 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 	}
 	fence_put(fence);
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		struct amdgpu_ring *ring = adev->rings[i];
+	for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
+		ring = adev->rings[i % AMDGPU_MAX_RINGS];
 		if (!ring || !ring->sched.thread)
 			continue;
 
+		if (job && j != i) {
+			kthread_unpark(ring->sched.thread);
+			continue;
+		}
+
 		amd_sched_job_recovery(&ring->sched);
 		kthread_unpark(ring->sched.thread);
 	}
@@ -2629,6 +2645,8 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
 	if (r) {
 		/* bad news, how to tell it to userspace ? */
 		dev_info(adev->dev, "GPU reset failed\n");
+	} else {
+		dev_info(adev->dev, "GPU reset successed!\n");
 	}
 
 	adev->gfx.in_reset = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 416908a..fd3691a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -112,6 +112,7 @@ int amdgpu_prim_buf_per_se = 0;  int amdgpu_pos_buf_per_se = 0;  int amdgpu_cntl_sb_buf_per_se = 0;  int amdgpu_param_buf_per_se = 0;
+int amdgpu_job_hang_limit = 0;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");  module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -237,6 +238,9 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);  MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");  module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
 
+MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and 
+not drop it (default 0)"); module_param_named(job_hang_limit, 
+amdgpu_job_hang_limit, int ,0444);
+
 
 static const struct pci_device_id pciidlist[] = {  #ifdef  CONFIG_DRM_AMDGPU_SI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d7523d1..be9aed1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -541,6 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
 	}
 }
 
+void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring 
+*ring) {
+	if (ring)
+		amdgpu_fence_write(ring, ring->fence_drv.sync_seq); }
+
 /*
  * Common fence implementation
  */
@@ -589,6 +595,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
 	struct fence *f = container_of(rcu, struct fence, rcu);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	kmem_cache_free(amdgpu_fence_slab, fence);
+	//printk("fence free:%p\n", f);
 }
 
 /**
@@ -601,6 +608,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
  */
 static void amdgpu_fence_release(struct fence *f)  {
+	//printk("call_rcu on fence:%p\n",f);
 	call_rcu(&f->rcu, amdgpu_fence_free);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 130357b..acb38a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -180,6 +180,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 			amdgpu_sched_hw_submission);
 		if (r)
 			return r;
+
+		printk("ring:%p, init, idx=%d\n", ring, ring->idx);
 	}
 
 	if (ring->funcs->support_64bit_ptrs) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 981ef08..03e88c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -76,6 +76,7 @@ struct amdgpu_fence_driver {  int amdgpu_fence_driver_init(struct amdgpu_device *adev);  void amdgpu_fence_driver_fini(struct amdgpu_device *adev);  void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring 
+*ring);
 
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 				  unsigned num_hw_submission);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c3fb2f9..f6e79c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -99,6 +99,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+	printk("ring %p,ring->idx=%d\n",ring, ring->idx);
 	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
 				  rq, amdgpu_sched_jobs);
 	if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index f84d642..6a8020b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1576,6 +1576,7 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
 	if (adev->mman.buffer_funcs == NULL) {
 		adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+		printk("%s\n",__func__);
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4e514b2..ed3c927 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -482,8 +482,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)  #endif
 		amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
-		amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
-		amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
+		//amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
+		//amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 6f4e31f..4e97e6d 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -390,9 +390,18 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
 					  &s_job->s_fence->cb)) {
 			fence_put(s_job->s_fence->parent);
 			s_job->s_fence->parent = NULL;
+			atomic_dec(&sched->hw_rq_count);
 		}
 	}
-	atomic_set(&sched->hw_rq_count, 0);
+	spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_kickout(struct amd_sched_job *s_job) {
+	struct amd_gpu_scheduler *sched = s_job->sched;
+
+	spin_lock(&sched->job_list_lock);
+	list_del_init(&s_job->node);
 	spin_unlock(&sched->job_list_lock);
 }
 
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 8cb41d3..59694f3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -81,6 +81,7 @@ struct amd_sched_job {
 	struct list_head		node;
 	struct delayed_work		work_tdr;
 	uint64_t			id;
+	atomic_t karma;
 };
 
 extern const struct fence_ops amd_sched_fence_ops_scheduled; @@ -96,6 +97,11 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 	return NULL;
 }
 
+static inline bool amd_sched_invalidate_job(struct amd_sched_job 
+*s_job, int threshold) {
+	return (s_job && atomic_inc_return(&s_job->karma) > threshold); }
+
 /**
  * Define the backend operations called by the scheduler,
  * these functions should be implemented in driver side @@ -158,4 +164,5 @@ int amd_sched_job_init(struct amd_sched_job *job,
 		       void *owner);
 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);  void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
+void amd_sched_job_kickout(struct amd_sched_job *s_job);
 #endif
--
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found]     ` <1494226269-8837-2-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
@ 2017-05-08  9:07       ` Christian König
       [not found]         ` <4d4fb987-9ccb-8fde-e485-7586f6ec49e8-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Christian König @ 2017-05-08  9:07 UTC (permalink / raw)
  To: Monk Liu, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 08.05.2017 um 08:51 schrieb Monk Liu:
> because we don't want to do sriov-gpu-reset under certain
> cases, so just split those two funtion and don't invoke
> sr-iov one from bare-metal one.
>
> Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
>   5 files changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 45a60a6..4985a7e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
>   	int resched;
>   	bool need_full_reset;
>   
> -	if (amdgpu_sriov_vf(adev))
> -		return amdgpu_sriov_gpu_reset(adev, true);
> -
>   	if (!amdgpu_check_soft_reset(adev)) {
>   		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
>   		return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> index 5772ef2..d7523d1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> @@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
>   	struct amdgpu_device *adev = dev->dev_private;
>   
>   	seq_printf(m, "gpu reset\n");
> -	amdgpu_gpu_reset(adev);
> +	if (!amdgpu_sriov_vf(adev))
> +		amdgpu_gpu_reset(adev);

Well that is clearly not a good idea. Why do you want to disable the 
reset here?

>   
>   	return 0;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 67be795..5bcbea0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
>   
>   static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
>   {
> -	if (r == -EDEADLK) {
> +	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {

Not a problem of your patch, but that stuff is outdated and should have 
been removed completely years ago. Going to take care of that.

>   		r = amdgpu_gpu_reset(adev);
>   		if (!r)
>   			r = -EAGAIN;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> index f8a6c95..49c6e6e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> @@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
>   	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
>   						  reset_work);
>   
> -	amdgpu_gpu_reset(adev);
> +	if (!amdgpu_sriov_vf(adev))
> +		amdgpu_gpu_reset(adev);

Mhm, that disables the reset on an invalid register access or invalid 
command stream. Is that really what we want?

Christian.

>   }
>   
>   /* Disable *all* interrupts */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 690ef3d..c7718af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>   		  job->base.sched->name,
>   		  atomic_read(&job->ring->fence_drv.last_seq),
>   		  job->ring->fence_drv.sync_seq);
> -	amdgpu_gpu_reset(job->adev);
> +
> +	if (amdgpu_sriov_vf(job->adev))
> +		amdgpu_sriov_gpu_reset(job->adev, true);
> +	else
> +		amdgpu_gpu_reset(job->adev);
>   }
>   
>   int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/4] drm/amdgpu:use job* to replace voluntary
       [not found]     ` <1494226269-8837-3-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
@ 2017-05-08  9:08       ` Christian König
  0 siblings, 0 replies; 16+ messages in thread
From: Christian König @ 2017-05-08  9:08 UTC (permalink / raw)
  To: Monk Liu, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 08.05.2017 um 08:51 schrieb Monk Liu:
> that way we can know which job cause hang and
> can do per sched reset/recovery instead of all
> sched.
>
> Change-Id: Ifc98cd74b2d93823c489de6a89087ba188957eff
> Signed-off-by: Monk Liu <Monk.Liu@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++----
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 2 +-
>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 2 +-
>   drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c      | 2 +-
>   5 files changed, 7 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 4985a7e..0e5f314 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2529,14 +2529,13 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
>    * amdgpu_sriov_gpu_reset - reset the asic
>    *
>    * @adev: amdgpu device pointer
> - * @voluntary: if this reset is requested by guest.
> - *             (true means by guest and false means by HYPERVISOR )
> + * @job: which job trigger hang
>    *
>    * Attempt the reset the GPU if it has hung (all asics).
>    * for SRIOV case.
>    * Returns 0 for success or an error on failure.
>    */
> -int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
> +int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
>   {
>   	int i, r = 0;
>   	int resched;
> @@ -2566,7 +2565,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
>   	amdgpu_fence_driver_force_completion(adev);
>   
>   	/* request to take full control of GPU before re-initialization  */
> -	if (voluntary)
> +	if (job)
>   		amdgpu_virt_reset_gpu(adev);
>   	else
>   		amdgpu_virt_request_full_gpu(adev, true);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index c7718af..3c6fb6e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -38,7 +38,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>   		  job->ring->fence_drv.sync_seq);
>   
>   	if (amdgpu_sriov_vf(job->adev))
> -		amdgpu_sriov_gpu_reset(job->adev, true);
> +		amdgpu_sriov_gpu_reset(job->adev, job);
>   	else
>   		amdgpu_gpu_reset(job->adev);
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 6f2b7df..9e1062e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -96,7 +96,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
>   int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
>   int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
>   int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
> -int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
> +int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
>   int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
>   void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 96139ec..69da52d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -243,7 +243,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
>   	}
>   
>   	/* Trigger recovery due to world switch failure */
> -	amdgpu_sriov_gpu_reset(adev, false);
> +	amdgpu_sriov_gpu_reset(adev, NULL);
>   }
>   
>   static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> index f0d64f1..1cdf5cc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> @@ -514,7 +514,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
>   	}
>   
>   	/* Trigger recovery due to world switch failure */
> -	amdgpu_sriov_gpu_reset(adev, false);
> +	amdgpu_sriov_gpu_reset(adev, NULL);
>   }
>   
>   static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found]         ` <4d4fb987-9ccb-8fde-e485-7586f6ec49e8-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-05-08  9:10           ` Liu, Monk
       [not found]             ` <DM5PR12MB16103A41862C9FCFF923295384EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Liu, Monk @ 2017-05-08  9:10 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

For SR-IOV use case, we call gpu reset under the case we have no choice ...

So many places like debug fs shouldn't a good reason to trigger gpu reset

You know that gpu reset under SR-IOV will have very big impact on all other VFs ...

BR Monk

-----Original Message-----
From: Christian König [mailto:deathsimple@vodafone.de] 
Sent: Monday, May 08, 2017 5:08 PM
To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset

Am 08.05.2017 um 08:51 schrieb Monk Liu:
> because we don't want to do sriov-gpu-reset under certain cases, so 
> just split those two funtion and don't invoke sr-iov one from 
> bare-metal one.
>
> Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
>   5 files changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 45a60a6..4985a7e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
>   	int resched;
>   	bool need_full_reset;
>   
> -	if (amdgpu_sriov_vf(adev))
> -		return amdgpu_sriov_gpu_reset(adev, true);
> -
>   	if (!amdgpu_check_soft_reset(adev)) {
>   		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
>   		return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> index 5772ef2..d7523d1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> @@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
>   	struct amdgpu_device *adev = dev->dev_private;
>   
>   	seq_printf(m, "gpu reset\n");
> -	amdgpu_gpu_reset(adev);
> +	if (!amdgpu_sriov_vf(adev))
> +		amdgpu_gpu_reset(adev);

Well that is clearly not a good idea. Why do you want to disable the reset here?

>   
>   	return 0;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 67be795..5bcbea0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct drm_gem_object 
> *obj,
>   
>   static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
>   {
> -	if (r == -EDEADLK) {
> +	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {

Not a problem of your patch, but that stuff is outdated and should have been removed completely years ago. Going to take care of that.

>   		r = amdgpu_gpu_reset(adev);
>   		if (!r)
>   			r = -EAGAIN;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> index f8a6c95..49c6e6e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> @@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
>   	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
>   						  reset_work);
>   
> -	amdgpu_gpu_reset(adev);
> +	if (!amdgpu_sriov_vf(adev))
> +		amdgpu_gpu_reset(adev);

Mhm, that disables the reset on an invalid register access or invalid command stream. Is that really what we want?

Christian.

>   }
>   
>   /* Disable *all* interrupts */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 690ef3d..c7718af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>   		  job->base.sched->name,
>   		  atomic_read(&job->ring->fence_drv.last_seq),
>   		  job->ring->fence_drv.sync_seq);
> -	amdgpu_gpu_reset(job->adev);
> +
> +	if (amdgpu_sriov_vf(job->adev))
> +		amdgpu_sriov_gpu_reset(job->adev, true);
> +	else
> +		amdgpu_gpu_reset(job->adev);
>   }
>   
>   int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] drm/amdgpu:only call flr_work under infinite timeout
       [not found]     ` <1494226269-8837-4-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
@ 2017-05-08  9:11       ` Christian König
       [not found]         ` <7ae97dfa-96bd-414b-0ace-ddf4e626440d-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Christian König @ 2017-05-08  9:11 UTC (permalink / raw)
  To: Monk Liu, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 08.05.2017 um 08:51 schrieb Monk Liu:
> Change-Id: I541aa5109f4fcab06ece4761a09dc7e053ec6837
> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 15 +++++++++------
>   1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> index 1cdf5cc..0109b5c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> @@ -537,12 +537,15 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
>   {
>   	int r;
>   
> -	/* see what event we get */
> -	r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
> -
> -	/* only handle FLR_NOTIFY now */
> -	if (!r)
> -		schedule_work(&adev->virt.flr_work);
> +	/* trigger gpu-reset by hypervisor only if TDR disbaled */
> +	if (msecs_to_jiffies(amdgpu_lockup_timeout) == MAX_SCHEDULE_TIMEOUT) {

That won't work like this, we use zero for infinite timeout here. See 
amdgpu_fence_driver_init_ring() as well.

Just changing that test to "amdgpu_lockup_timeout == 0" should work.

Christian.

> +		/* see what event we get */
> +		r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
> +
> +		/* only handle FLR_NOTIFY now */
> +		if (!r)
> +			schedule_work(&adev->virt.flr_work);
> +	}
>   
>   	return 0;
>   }


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 3/4] drm/amdgpu:only call flr_work under infinite timeout
       [not found]         ` <7ae97dfa-96bd-414b-0ace-ddf4e626440d-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-05-08  9:15           ` Liu, Monk
  0 siblings, 0 replies; 16+ messages in thread
From: Liu, Monk @ 2017-05-08  9:15 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

yeah my mistake, thanks for catch 

-----Original Message-----
From: Christian König [mailto:deathsimple@vodafone.de] 
Sent: Monday, May 08, 2017 5:11 PM
To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 3/4] drm/amdgpu:only call flr_work under infinite timeout

Am 08.05.2017 um 08:51 schrieb Monk Liu:
> Change-Id: I541aa5109f4fcab06ece4761a09dc7e053ec6837
> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 15 +++++++++------
>   1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> index 1cdf5cc..0109b5c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
> @@ -537,12 +537,15 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
>   {
>   	int r;
>   
> -	/* see what event we get */
> -	r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
> -
> -	/* only handle FLR_NOTIFY now */
> -	if (!r)
> -		schedule_work(&adev->virt.flr_work);
> +	/* trigger gpu-reset by hypervisor only if TDR disbaled */
> +	if (msecs_to_jiffies(amdgpu_lockup_timeout) == MAX_SCHEDULE_TIMEOUT) {

That won't work like this, we use zero for infinite timeout here. See 
amdgpu_fence_driver_init_ring() as well.

Just changing that test to "amdgpu_lockup_timeout == 0" should work.

Christian.

> +		/* see what event we get */
> +		r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
> +
> +		/* only handle FLR_NOTIFY now */
> +		if (!r)
> +			schedule_work(&adev->virt.flr_work);
> +	}
>   
>   	return 0;
>   }


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found]             ` <DM5PR12MB16103A41862C9FCFF923295384EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2017-05-08  9:19               ` Christian König
       [not found]                 ` <707273ff-6cf7-4d86-bbe4-7cebe928840d-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Christian König @ 2017-05-08  9:19 UTC (permalink / raw)
  To: Liu, Monk, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
Mhm, good argument. But in this case we need to give at least some 
warning message instead of doing nothing.

Or even better disable creating the amdgpu_reste debugfs file 
altogether. This way nobody will wonder why using it doesn't trigger 
anything.

Christian.

Am 08.05.2017 um 11:10 schrieb Liu, Monk:
> For SR-IOV use case, we call gpu reset under the case we have no choice ...
>
> So many places like debug fs shouldn't a good reason to trigger gpu reset
>
> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
>
> BR Monk
>
> -----Original Message-----
> From: Christian König [mailto:deathsimple@vodafone.de]
> Sent: Monday, May 08, 2017 5:08 PM
> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
>
> Am 08.05.2017 um 08:51 schrieb Monk Liu:
>> because we don't want to do sriov-gpu-reset under certain cases, so
>> just split those two funtion and don't invoke sr-iov one from
>> bare-metal one.
>>
>> Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
>> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
>> ---
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
>>    5 files changed, 10 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 45a60a6..4985a7e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
>>    	int resched;
>>    	bool need_full_reset;
>>    
>> -	if (amdgpu_sriov_vf(adev))
>> -		return amdgpu_sriov_gpu_reset(adev, true);
>> -
>>    	if (!amdgpu_check_soft_reset(adev)) {
>>    		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
>>    		return 0;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> index 5772ef2..d7523d1 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> @@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
>>    	struct amdgpu_device *adev = dev->dev_private;
>>    
>>    	seq_printf(m, "gpu reset\n");
>> -	amdgpu_gpu_reset(adev);
>> +	if (!amdgpu_sriov_vf(adev))
>> +		amdgpu_gpu_reset(adev);
> Well that is clearly not a good idea. Why do you want to disable the reset here?
>
>>    
>>    	return 0;
>>    }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> index 67be795..5bcbea0 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> @@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct drm_gem_object
>> *obj,
>>    
>>    static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
>>    {
>> -	if (r == -EDEADLK) {
>> +	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {
> Not a problem of your patch, but that stuff is outdated and should have been removed completely years ago. Going to take care of that.
>
>>    		r = amdgpu_gpu_reset(adev);
>>    		if (!r)
>>    			r = -EAGAIN;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> index f8a6c95..49c6e6e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> @@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
>>    	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
>>    						  reset_work);
>>    
>> -	amdgpu_gpu_reset(adev);
>> +	if (!amdgpu_sriov_vf(adev))
>> +		amdgpu_gpu_reset(adev);
> Mhm, that disables the reset on an invalid register access or invalid command stream. Is that really what we want?
>
> Christian.
>
>>    }
>>    
>>    /* Disable *all* interrupts */
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 690ef3d..c7718af 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>>    		  job->base.sched->name,
>>    		  atomic_read(&job->ring->fence_drv.last_seq),
>>    		  job->ring->fence_drv.sync_seq);
>> -	amdgpu_gpu_reset(job->adev);
>> +
>> +	if (amdgpu_sriov_vf(job->adev))
>> +		amdgpu_sriov_gpu_reset(job->adev, true);
>> +	else
>> +		amdgpu_gpu_reset(job->adev);
>>    }
>>    
>>    int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found]                 ` <707273ff-6cf7-4d86-bbe4-7cebe928840d-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-05-08  9:24                   ` Liu, Monk
       [not found]                     ` <DM5PR12MB1610A94BDEBFCAB42975560B84EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Liu, Monk @ 2017-05-08  9:24 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

I agree with disabling debugfs for amdgpu_reset when SRIOV detected. 

-----Original Message-----
From: Christian König [mailto:deathsimple@vodafone.de] 
Sent: Monday, May 08, 2017 5:20 PM
To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset

> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
Mhm, good argument. But in this case we need to give at least some warning message instead of doing nothing.

Or even better disable creating the amdgpu_reste debugfs file altogether. This way nobody will wonder why using it doesn't trigger anything.

Christian.

Am 08.05.2017 um 11:10 schrieb Liu, Monk:
> For SR-IOV use case, we call gpu reset under the case we have no choice ...
>
> So many places like debug fs shouldn't a good reason to trigger gpu 
> reset
>
> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
>
> BR Monk
>
> -----Original Message-----
> From: Christian König [mailto:deathsimple@vodafone.de]
> Sent: Monday, May 08, 2017 5:08 PM
> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in 
> gpu-reset
>
> Am 08.05.2017 um 08:51 schrieb Monk Liu:
>> because we don't want to do sriov-gpu-reset under certain cases, so 
>> just split those two funtion and don't invoke sr-iov one from 
>> bare-metal one.
>>
>> Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
>> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
>> ---
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
>>    drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
>>    5 files changed, 10 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 45a60a6..4985a7e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
>>    	int resched;
>>    	bool need_full_reset;
>>    
>> -	if (amdgpu_sriov_vf(adev))
>> -		return amdgpu_sriov_gpu_reset(adev, true);
>> -
>>    	if (!amdgpu_check_soft_reset(adev)) {
>>    		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
>>    		return 0;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> index 5772ef2..d7523d1 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>> @@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
>>    	struct amdgpu_device *adev = dev->dev_private;
>>    
>>    	seq_printf(m, "gpu reset\n");
>> -	amdgpu_gpu_reset(adev);
>> +	if (!amdgpu_sriov_vf(adev))
>> +		amdgpu_gpu_reset(adev);
> Well that is clearly not a good idea. Why do you want to disable the reset here?
>
>>    
>>    	return 0;
>>    }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> index 67be795..5bcbea0 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> @@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct 
>> drm_gem_object *obj,
>>    
>>    static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
>>    {
>> -	if (r == -EDEADLK) {
>> +	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {
> Not a problem of your patch, but that stuff is outdated and should have been removed completely years ago. Going to take care of that.
>
>>    		r = amdgpu_gpu_reset(adev);
>>    		if (!r)
>>    			r = -EAGAIN;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> index f8a6c95..49c6e6e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>> @@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
>>    	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
>>    						  reset_work);
>>    
>> -	amdgpu_gpu_reset(adev);
>> +	if (!amdgpu_sriov_vf(adev))
>> +		amdgpu_gpu_reset(adev);
> Mhm, that disables the reset on an invalid register access or invalid command stream. Is that really what we want?
>
> Christian.
>
>>    }
>>    
>>    /* Disable *all* interrupts */
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 690ef3d..c7718af 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>>    		  job->base.sched->name,
>>    		  atomic_read(&job->ring->fence_drv.last_seq),
>>    		  job->ring->fence_drv.sync_seq);
>> -	amdgpu_gpu_reset(job->adev);
>> +
>> +	if (amdgpu_sriov_vf(job->adev))
>> +		amdgpu_sriov_gpu_reset(job->adev, true);
>> +	else
>> +		amdgpu_gpu_reset(job->adev);
>>    }
>>    
>>    int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found]                     ` <DM5PR12MB1610A94BDEBFCAB42975560B84EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2017-05-08  9:33                       ` Christian König
       [not found]                         ` <3290f7e0-56a7-98e0-db60-0ce968cd65e5-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Christian König @ 2017-05-08  9:33 UTC (permalink / raw)
  To: Liu, Monk, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Sounds good, but what do we do with the amdgpu_irq_reset_work_func?

Please note that I find that calling amdgpu_gpu_reset() here is a bad 
idea in the first place.

Instead we should consider the scheduler as faulting and let the 
scheduler handle that as in the same way as a job timeout.

But I'm not sure if those interrupts are actually send under SRIOV or if 
the hypervisor handles them somehow.

Christian.

Am 08.05.2017 um 11:24 schrieb Liu, Monk:
> I agree with disabling debugfs for amdgpu_reset when SRIOV detected.
>
> -----Original Message-----
> From: Christian König [mailto:deathsimple@vodafone.de]
> Sent: Monday, May 08, 2017 5:20 PM
> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
>
>> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
> Mhm, good argument. But in this case we need to give at least some warning message instead of doing nothing.
>
> Or even better disable creating the amdgpu_reste debugfs file altogether. This way nobody will wonder why using it doesn't trigger anything.
>
> Christian.
>
> Am 08.05.2017 um 11:10 schrieb Liu, Monk:
>> For SR-IOV use case, we call gpu reset under the case we have no choice ...
>>
>> So many places like debug fs shouldn't a good reason to trigger gpu
>> reset
>>
>> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
>>
>> BR Monk
>>
>> -----Original Message-----
>> From: Christian König [mailto:deathsimple@vodafone.de]
>> Sent: Monday, May 08, 2017 5:08 PM
>> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
>> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in
>> gpu-reset
>>
>> Am 08.05.2017 um 08:51 schrieb Monk Liu:
>>> because we don't want to do sriov-gpu-reset under certain cases, so
>>> just split those two funtion and don't invoke sr-iov one from
>>> bare-metal one.
>>>
>>> Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
>>> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
>>> ---
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
>>>     5 files changed, 10 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 45a60a6..4985a7e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
>>>     	int resched;
>>>     	bool need_full_reset;
>>>     
>>> -	if (amdgpu_sriov_vf(adev))
>>> -		return amdgpu_sriov_gpu_reset(adev, true);
>>> -
>>>     	if (!amdgpu_check_soft_reset(adev)) {
>>>     		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
>>>     		return 0;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> index 5772ef2..d7523d1 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> @@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
>>>     	struct amdgpu_device *adev = dev->dev_private;
>>>     
>>>     	seq_printf(m, "gpu reset\n");
>>> -	amdgpu_gpu_reset(adev);
>>> +	if (!amdgpu_sriov_vf(adev))
>>> +		amdgpu_gpu_reset(adev);
>> Well that is clearly not a good idea. Why do you want to disable the reset here?
>>
>>>     
>>>     	return 0;
>>>     }
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> index 67be795..5bcbea0 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> @@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct
>>> drm_gem_object *obj,
>>>     
>>>     static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
>>>     {
>>> -	if (r == -EDEADLK) {
>>> +	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {
>> Not a problem of your patch, but that stuff is outdated and should have been removed completely years ago. Going to take care of that.
>>
>>>     		r = amdgpu_gpu_reset(adev);
>>>     		if (!r)
>>>     			r = -EAGAIN;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> index f8a6c95..49c6e6e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> @@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
>>>     	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
>>>     						  reset_work);
>>>     
>>> -	amdgpu_gpu_reset(adev);
>>> +	if (!amdgpu_sriov_vf(adev))
>>> +		amdgpu_gpu_reset(adev);
>> Mhm, that disables the reset on an invalid register access or invalid command stream. Is that really what we want?
>>
>> Christian.
>>
>>>     }
>>>     
>>>     /* Disable *all* interrupts */
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> index 690ef3d..c7718af 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>>>     		  job->base.sched->name,
>>>     		  atomic_read(&job->ring->fence_drv.last_seq),
>>>     		  job->ring->fence_drv.sync_seq);
>>> -	amdgpu_gpu_reset(job->adev);
>>> +
>>> +	if (amdgpu_sriov_vf(job->adev))
>>> +		amdgpu_sriov_gpu_reset(job->adev, true);
>>> +	else
>>> +		amdgpu_gpu_reset(job->adev);
>>>     }
>>>     
>>>     int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found]                         ` <3290f7e0-56a7-98e0-db60-0ce968cd65e5-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-05-08  9:42                           ` Liu, Monk
       [not found]                             ` <DM5PR12MB1610851C6F0BDD8FAC54DE1284EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Liu, Monk @ 2017-05-08  9:42 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

The VM fault interrupt or illegal instruction  will be delivered to GPU no matter it's SR-IOV or bare-metal case,
And I removed them from invoking GPU reset is due to the same reason:
Don't trigger gpu reset for sriov case if possible, always beware that trigger GPU reset under SR-IOV is a heavy cost (need take full access mode on GPU, so all
Other VFs will be paused for a while)

Because we can always rely on TDR and HYPERVISOR to detect GPU hang and resubmit malicious jobs or even kick them out later, 
and the gpu reset will eventually be invoked, so there is no reason to manually and voluntarily call gpu reset under SRIOV case.

BR Monk


-----Original Message-----
From: Christian König [mailto:deathsimple@vodafone.de] 
Sent: Monday, May 08, 2017 5:34 PM
To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset

Sounds good, but what do we do with the amdgpu_irq_reset_work_func?

Please note that I find that calling amdgpu_gpu_reset() here is a bad idea in the first place.

Instead we should consider the scheduler as faulting and let the scheduler handle that as in the same way as a job timeout.

But I'm not sure if those interrupts are actually send under SRIOV or if the hypervisor handles them somehow.

Christian.

Am 08.05.2017 um 11:24 schrieb Liu, Monk:
> I agree with disabling debugfs for amdgpu_reset when SRIOV detected.
>
> -----Original Message-----
> From: Christian König [mailto:deathsimple@vodafone.de]
> Sent: Monday, May 08, 2017 5:20 PM
> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in 
> gpu-reset
>
>> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
> Mhm, good argument. But in this case we need to give at least some warning message instead of doing nothing.
>
> Or even better disable creating the amdgpu_reste debugfs file altogether. This way nobody will wonder why using it doesn't trigger anything.
>
> Christian.
>
> Am 08.05.2017 um 11:10 schrieb Liu, Monk:
>> For SR-IOV use case, we call gpu reset under the case we have no choice ...
>>
>> So many places like debug fs shouldn't a good reason to trigger gpu 
>> reset
>>
>> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
>>
>> BR Monk
>>
>> -----Original Message-----
>> From: Christian König [mailto:deathsimple@vodafone.de]
>> Sent: Monday, May 08, 2017 5:08 PM
>> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
>> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in 
>> gpu-reset
>>
>> Am 08.05.2017 um 08:51 schrieb Monk Liu:
>>> because we don't want to do sriov-gpu-reset under certain cases, so 
>>> just split those two funtion and don't invoke sr-iov one from 
>>> bare-metal one.
>>>
>>> Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
>>> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
>>> ---
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
>>>     drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
>>>     5 files changed, 10 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 45a60a6..4985a7e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
>>>     	int resched;
>>>     	bool need_full_reset;
>>>     
>>> -	if (amdgpu_sriov_vf(adev))
>>> -		return amdgpu_sriov_gpu_reset(adev, true);
>>> -
>>>     	if (!amdgpu_check_soft_reset(adev)) {
>>>     		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
>>>     		return 0;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> index 5772ef2..d7523d1 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>> @@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
>>>     	struct amdgpu_device *adev = dev->dev_private;
>>>     
>>>     	seq_printf(m, "gpu reset\n");
>>> -	amdgpu_gpu_reset(adev);
>>> +	if (!amdgpu_sriov_vf(adev))
>>> +		amdgpu_gpu_reset(adev);
>> Well that is clearly not a good idea. Why do you want to disable the reset here?
>>
>>>     
>>>     	return 0;
>>>     }
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> index 67be795..5bcbea0 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>> @@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct 
>>> drm_gem_object *obj,
>>>     
>>>     static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
>>>     {
>>> -	if (r == -EDEADLK) {
>>> +	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {
>> Not a problem of your patch, but that stuff is outdated and should have been removed completely years ago. Going to take care of that.
>>
>>>     		r = amdgpu_gpu_reset(adev);
>>>     		if (!r)
>>>     			r = -EAGAIN;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> index f8a6c95..49c6e6e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>> @@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
>>>     	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
>>>     						  reset_work);
>>>     
>>> -	amdgpu_gpu_reset(adev);
>>> +	if (!amdgpu_sriov_vf(adev))
>>> +		amdgpu_gpu_reset(adev);
>> Mhm, that disables the reset on an invalid register access or invalid command stream. Is that really what we want?
>>
>> Christian.
>>
>>>     }
>>>     
>>>     /* Disable *all* interrupts */
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> index 690ef3d..c7718af 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>>>     		  job->base.sched->name,
>>>     		  atomic_read(&job->ring->fence_drv.last_seq),
>>>     		  job->ring->fence_drv.sync_seq);
>>> -	amdgpu_gpu_reset(job->adev);
>>> +
>>> +	if (amdgpu_sriov_vf(job->adev))
>>> +		amdgpu_sriov_gpu_reset(job->adev, true);
>>> +	else
>>> +		amdgpu_gpu_reset(job->adev);
>>>     }
>>>     
>>>     int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned 
>>> num_ibs,
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
       [not found]                             ` <DM5PR12MB1610851C6F0BDD8FAC54DE1284EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2017-05-08  9:50                               ` Christian König
  0 siblings, 0 replies; 16+ messages in thread
From: Christian König @ 2017-05-08  9:50 UTC (permalink / raw)
  To: Liu, Monk, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

> Because we can always rely on TDR and HYPERVISOR to detect GPU hang and resubmit malicious jobs or even kick them out later,
> and the gpu reset will eventually be invoked, so there is no reason to manually and voluntarily call gpu reset under SRIOV case.
Well there is a rather good reason, we detect that something is wrong 
much faster than waiting for the timeout.

But I agree that it was broken before as well and we can fix that later. 
Please add a code comment that this needs more work.

With that fixed feel free to add my rb on it.

Christian.

Am 08.05.2017 um 11:42 schrieb Liu, Monk:
> The VM fault interrupt or illegal instruction  will be delivered to GPU no matter it's SR-IOV or bare-metal case,
> And I removed them from invoking GPU reset is due to the same reason:
> Don't trigger gpu reset for sriov case if possible, always beware that trigger GPU reset under SR-IOV is a heavy cost (need take full access mode on GPU, so all
> Other VFs will be paused for a while)
>
> Because we can always rely on TDR and HYPERVISOR to detect GPU hang and resubmit malicious jobs or even kick them out later,
> and the gpu reset will eventually be invoked, so there is no reason to manually and voluntarily call gpu reset under SRIOV case.
>
> BR Monk
>
>
> -----Original Message-----
> From: Christian König [mailto:deathsimple@vodafone.de]
> Sent: Monday, May 08, 2017 5:34 PM
> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset
>
> Sounds good, but what do we do with the amdgpu_irq_reset_work_func?
>
> Please note that I find that calling amdgpu_gpu_reset() here is a bad idea in the first place.
>
> Instead we should consider the scheduler as faulting and let the scheduler handle that as in the same way as a job timeout.
>
> But I'm not sure if those interrupts are actually send under SRIOV or if the hypervisor handles them somehow.
>
> Christian.
>
> Am 08.05.2017 um 11:24 schrieb Liu, Monk:
>> I agree with disabling debugfs for amdgpu_reset when SRIOV detected.
>>
>> -----Original Message-----
>> From: Christian König [mailto:deathsimple@vodafone.de]
>> Sent: Monday, May 08, 2017 5:20 PM
>> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
>> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in
>> gpu-reset
>>
>>> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
>> Mhm, good argument. But in this case we need to give at least some warning message instead of doing nothing.
>>
>> Or even better disable creating the amdgpu_reste debugfs file altogether. This way nobody will wonder why using it doesn't trigger anything.
>>
>> Christian.
>>
>> Am 08.05.2017 um 11:10 schrieb Liu, Monk:
>>> For SR-IOV use case, we call gpu reset under the case we have no choice ...
>>>
>>> So many places like debug fs shouldn't a good reason to trigger gpu
>>> reset
>>>
>>> You know that gpu reset under SR-IOV will have very big impact on all other VFs ...
>>>
>>> BR Monk
>>>
>>> -----Original Message-----
>>> From: Christian König [mailto:deathsimple@vodafone.de]
>>> Sent: Monday, May 08, 2017 5:08 PM
>>> To: Liu, Monk <Monk.Liu@amd.com>; amd-gfx@lists.freedesktop.org
>>> Subject: Re: [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in
>>> gpu-reset
>>>
>>> Am 08.05.2017 um 08:51 schrieb Monk Liu:
>>>> because we don't want to do sriov-gpu-reset under certain cases, so
>>>> just split those two funtion and don't invoke sr-iov one from
>>>> bare-metal one.
>>>>
>>>> Change-Id: I641126c241e2ee2dfd54e6d16c389b159f99cfe0
>>>> Signed-off-by: Monk Liu <Monk.Liu@amd.com>
>>>> ---
>>>>      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
>>>>      drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  | 3 ++-
>>>>      drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 2 +-
>>>>      drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 3 ++-
>>>>      drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    | 6 +++++-
>>>>      5 files changed, 10 insertions(+), 7 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>> index 45a60a6..4985a7e 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>> @@ -2652,9 +2652,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
>>>>      	int resched;
>>>>      	bool need_full_reset;
>>>>      
>>>> -	if (amdgpu_sriov_vf(adev))
>>>> -		return amdgpu_sriov_gpu_reset(adev, true);
>>>> -
>>>>      	if (!amdgpu_check_soft_reset(adev)) {
>>>>      		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
>>>>      		return 0;
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>>> index 5772ef2..d7523d1 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>>>> @@ -651,7 +651,8 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
>>>>      	struct amdgpu_device *adev = dev->dev_private;
>>>>      
>>>>      	seq_printf(m, "gpu reset\n");
>>>> -	amdgpu_gpu_reset(adev);
>>>> +	if (!amdgpu_sriov_vf(adev))
>>>> +		amdgpu_gpu_reset(adev);
>>> Well that is clearly not a good idea. Why do you want to disable the reset here?
>>>
>>>>      
>>>>      	return 0;
>>>>      }
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>>> index 67be795..5bcbea0 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>>> @@ -221,7 +221,7 @@ void amdgpu_gem_object_close(struct
>>>> drm_gem_object *obj,
>>>>      
>>>>      static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
>>>>      {
>>>> -	if (r == -EDEADLK) {
>>>> +	if (r == -EDEADLK && !amdgpu_sriov_vf(adev)) {
>>> Not a problem of your patch, but that stuff is outdated and should have been removed completely years ago. Going to take care of that.
>>>
>>>>      		r = amdgpu_gpu_reset(adev);
>>>>      		if (!r)
>>>>      			r = -EAGAIN;
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>>> index f8a6c95..49c6e6e 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>>>> @@ -89,7 +89,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
>>>>      	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
>>>>      						  reset_work);
>>>>      
>>>> -	amdgpu_gpu_reset(adev);
>>>> +	if (!amdgpu_sriov_vf(adev))
>>>> +		amdgpu_gpu_reset(adev);
>>> Mhm, that disables the reset on an invalid register access or invalid command stream. Is that really what we want?
>>>
>>> Christian.
>>>
>>>>      }
>>>>      
>>>>      /* Disable *all* interrupts */
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> index 690ef3d..c7718af 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> @@ -36,7 +36,11 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
>>>>      		  job->base.sched->name,
>>>>      		  atomic_read(&job->ring->fence_drv.last_seq),
>>>>      		  job->ring->fence_drv.sync_seq);
>>>> -	amdgpu_gpu_reset(job->adev);
>>>> +
>>>> +	if (amdgpu_sriov_vf(job->adev))
>>>> +		amdgpu_sriov_gpu_reset(job->adev, true);
>>>> +	else
>>>> +		amdgpu_gpu_reset(job->adev);
>>>>      }
>>>>      
>>>>      int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned
>>>> num_ibs,
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2017-05-08  9:50 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-08  6:51 [PATCH 0/4] TDR guilty job feature Monk Liu
     [not found] ` <1494226269-8837-1-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
2017-05-08  6:51   ` [PATCH 1/4] drm/amdgpu:don't invoke srio-gpu-reset in gpu-reset Monk Liu
     [not found]     ` <1494226269-8837-2-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
2017-05-08  9:07       ` Christian König
     [not found]         ` <4d4fb987-9ccb-8fde-e485-7586f6ec49e8-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-05-08  9:10           ` Liu, Monk
     [not found]             ` <DM5PR12MB16103A41862C9FCFF923295384EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2017-05-08  9:19               ` Christian König
     [not found]                 ` <707273ff-6cf7-4d86-bbe4-7cebe928840d-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-05-08  9:24                   ` Liu, Monk
     [not found]                     ` <DM5PR12MB1610A94BDEBFCAB42975560B84EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2017-05-08  9:33                       ` Christian König
     [not found]                         ` <3290f7e0-56a7-98e0-db60-0ce968cd65e5-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-05-08  9:42                           ` Liu, Monk
     [not found]                             ` <DM5PR12MB1610851C6F0BDD8FAC54DE1284EE0-2J9CzHegvk++jCVTvoAFKAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2017-05-08  9:50                               ` Christian König
2017-05-08  6:51   ` [PATCH 2/4] drm/amdgpu:use job* to replace voluntary Monk Liu
     [not found]     ` <1494226269-8837-3-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
2017-05-08  9:08       ` Christian König
2017-05-08  6:51   ` [PATCH 3/4] drm/amdgpu:only call flr_work under infinite timeout Monk Liu
     [not found]     ` <1494226269-8837-4-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
2017-05-08  9:11       ` Christian König
     [not found]         ` <7ae97dfa-96bd-414b-0ace-ddf4e626440d-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-05-08  9:15           ` Liu, Monk
2017-05-08  6:51   ` [PATCH 4/4] drm/amdgpu/SRIOV:implement guilty job TDR for Monk Liu
     [not found]     ` <1494226269-8837-5-git-send-email-Monk.Liu-5C7GfCeVMHo@public.gmane.org>
2017-05-08  7:00       ` Liu, Monk

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.