dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling.
@ 2018-12-10 21:43 Andrey Grodzovsky
       [not found] ` <1544478238-13310-1-git-send-email-andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Andrey Grodzovsky @ 2018-12-10 21:43 UTC (permalink / raw)
  To: dri-devel, amd-gfx, ckoenig.leichtzumerken, eric, etnaviv; +Cc: Monk.Liu

Decauple sched threads stop and start and ring mirror
list handling from the policy of what to do about the
guilty jobs.
When stoppping the sched thread and detaching sched fences
from non signaled HW fenes wait for all signaled HW fences
to complete before rerunning the jobs.

v2: Fix resubmission of guilty job into HW after refactoring.

Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  17 +++--
 drivers/gpu/drm/etnaviv/etnaviv_sched.c    |   8 +--
 drivers/gpu/drm/scheduler/sched_main.c     | 110 ++++++++++++++++++-----------
 drivers/gpu/drm/v3d/v3d_sched.c            |  11 +--
 include/drm/gpu_scheduler.h                |  10 ++-
 5 files changed, 95 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ef36cc5..42111d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3292,17 +3292,16 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 	/* block all schedulers and reset given job's ring */
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
+		bool park_only = job && job->base.sched != &ring->sched;
 
 		if (!ring || !ring->sched.thread)
 			continue;
 
-		kthread_park(ring->sched.thread);
+		drm_sched_stop(&ring->sched, job ? &job->base : NULL, park_only);
 
-		if (job && job->base.sched != &ring->sched)
+		if (park_only)
 			continue;
 
-		drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
-
 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
 		amdgpu_fence_driver_force_completion(ring);
 	}
@@ -3445,6 +3444,7 @@ static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
 					  struct amdgpu_job *job)
 {
 	int i;
+	bool unpark_only;
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
@@ -3456,10 +3456,13 @@ static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
 		 * or all rings (in the case @job is NULL)
 		 * after above amdgpu_reset accomplished
 		 */
-		if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
-			drm_sched_job_recovery(&ring->sched);
+		unpark_only = (job && job->base.sched != &ring->sched) ||
+			       adev->asic_reset_res;
+
+		if (!unpark_only)
+			drm_sched_resubmit_jobs(&ring->sched);
 
-		kthread_unpark(ring->sched.thread);
+		drm_sched_start(&ring->sched, unpark_only);
 	}
 
 	if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 49a6763..fab3b51 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -109,16 +109,16 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
 	}
 
 	/* block scheduler */
-	kthread_park(gpu->sched.thread);
-	drm_sched_hw_job_reset(&gpu->sched, sched_job);
+	drm_sched_stop(&gpu->sched, sched_job, false);
 
 	/* get the GPU back into the init state */
 	etnaviv_core_dump(gpu);
 	etnaviv_gpu_recover_hang(gpu);
 
+	drm_sched_resubmit_jobs(&gpu->sched);
+
 	/* restart scheduler after GPU is usable again */
-	drm_sched_job_recovery(&gpu->sched);
-	kthread_unpark(gpu->sched.thread);
+	drm_sched_start(&gpu->sched);
 }
 
 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index dbb6906..cdf95e2 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@
 
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
-static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
-
 /**
  * drm_sched_rq_init - initialize a given run queue struct
  *
@@ -342,13 +340,21 @@ static void drm_sched_job_timedout(struct work_struct *work)
  * @bad: bad scheduler job
  *
  */
-void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad,
+		    bool park_only)
 {
 	struct drm_sched_job *s_job;
 	struct drm_sched_entity *entity, *tmp;
 	unsigned long flags;
+	struct list_head wait_list;
 	int i;
 
+	kthread_park(sched->thread);
+	if (park_only)
+		return;
+
+	INIT_LIST_HEAD(&wait_list);
+
 	spin_lock_irqsave(&sched->job_list_lock, flags);
 	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
 		if (s_job->s_fence->parent &&
@@ -358,9 +364,24 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 			s_job->s_fence->parent = NULL;
 			atomic_dec(&sched->hw_rq_count);
 		}
+		else {
+			/* TODO Is it get/put neccessey here ? */
+			dma_fence_get(&s_job->s_fence->finished);
+			list_add(&s_job->finish_node, &wait_list);
+		}
 	}
 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
+	/*
+	 * Verify all the signaled jobs in mirror list are removed from the ring
+	 * We rely on the fact that any finish_work in progress will wait for this
+	 * handler to complete before releasing all of the jobs we iterate.
+	 */
+	list_for_each_entry(s_job, &wait_list, finish_node) {
+		dma_fence_wait(&s_job->s_fence->finished, false);
+		dma_fence_put(&s_job->s_fence->finished);
+	}
+
 	if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
 		atomic_inc(&bad->karma);
 		/* don't increase @bad's karma if it's from KERNEL RQ,
@@ -385,7 +406,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 		}
 	}
 }
-EXPORT_SYMBOL(drm_sched_hw_job_reset);
+EXPORT_SYMBOL(drm_sched_stop);
 
 /**
  * drm_sched_job_recovery - recover jobs after a reset
@@ -393,33 +414,21 @@ EXPORT_SYMBOL(drm_sched_hw_job_reset);
  * @sched: scheduler instance
  *
  */
-void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+void drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)
 {
 	struct drm_sched_job *s_job, *tmp;
-	bool found_guilty = false;
 	unsigned long flags;
 	int r;
 
-	spin_lock_irqsave(&sched->job_list_lock, flags);
+	if (unpark_only)
+		goto unpark;
+
+	spin_lock_irqsave(&sched->job_list_lock, flags);	
 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
-		struct dma_fence *fence;
-		uint64_t guilty_context;
-
-		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
-			found_guilty = true;
-			guilty_context = s_job->s_fence->scheduled.context;
-		}
-
-		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
-			dma_fence_set_error(&s_fence->finished, -ECANCELED);
-
-		spin_unlock_irqrestore(&sched->job_list_lock, flags);
-		fence = sched->ops->run_job(s_job);
-		atomic_inc(&sched->hw_rq_count);
+		struct dma_fence *fence = s_job->s_fence->parent;
 
 		if (fence) {
-			s_fence->parent = dma_fence_get(fence);
 			r = dma_fence_add_callback(fence, &s_fence->cb,
 						   drm_sched_process_job);
 			if (r == -ENOENT)
@@ -427,18 +436,47 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
-			dma_fence_put(fence);
-		} else {
-			if (s_fence->finished.error < 0)
-				drm_sched_expel_job_unlocked(s_job);
+		} else
 			drm_sched_process_job(NULL, &s_fence->cb);
-		}
-		spin_lock_irqsave(&sched->job_list_lock, flags);
 	}
+
 	drm_sched_start_timeout(sched);
 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+unpark:
+	kthread_unpark(sched->thread);
 }
-EXPORT_SYMBOL(drm_sched_job_recovery);
+EXPORT_SYMBOL(drm_sched_start);
+
+/**
+ * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ *
+ * @sched: scheduler instance
+ *
+ */
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
+{
+	struct drm_sched_job *s_job, *tmp;
+	uint64_t guilty_context;
+	bool found_guilty = false;
+
+	/*TODO DO we need spinlock here ? */
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+		struct drm_sched_fence *s_fence = s_job->s_fence;
+
+		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
+			found_guilty = true;
+			guilty_context = s_job->s_fence->scheduled.context;
+		}
+
+		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
+			dma_fence_set_error(&s_fence->finished, -ECANCELED);
+
+		s_job->s_fence->parent = sched->ops->run_job(s_job);
+		atomic_inc(&sched->hw_rq_count);
+	}
+}
+EXPORT_SYMBOL(drm_sched_resubmit_jobs);
 
 /**
  * drm_sched_job_init - init a scheduler job
@@ -634,26 +672,14 @@ static int drm_sched_main(void *param)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
 			dma_fence_put(fence);
-		} else {
-			if (s_fence->finished.error < 0)
-				drm_sched_expel_job_unlocked(sched_job);
+		} else
 			drm_sched_process_job(NULL, &s_fence->cb);
-		}
 
 		wake_up(&sched->job_scheduled);
 	}
 	return 0;
 }
 
-static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
-{
-	struct drm_gpu_scheduler *sched = s_job->sched;
-
-	spin_lock(&sched->job_list_lock);
-	list_del_init(&s_job->node);
-	spin_unlock(&sched->job_list_lock);
-}
-
 /**
  * drm_sched_init - Init a gpu scheduler instance
  *
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 445b2ef..f99346a 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -178,18 +178,19 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
 	for (q = 0; q < V3D_MAX_QUEUES; q++) {
 		struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
 
-		kthread_park(sched->thread);
-		drm_sched_hw_job_reset(sched, (sched_job->sched == sched ?
-					       sched_job : NULL));
+		drm_sched_stop(sched, (sched_job->sched == sched ?
+					       sched_job : NULL), false);
 	}
 
 	/* get the GPU back into the init state */
 	v3d_reset(v3d);
 
+	for (q = 0; q < V3D_MAX_QUEUES; q++)
+		drm_sched_resubmit_jobs(sched_job->sched);
+
 	/* Unblock schedulers and restart their jobs. */
 	for (q = 0; q < V3D_MAX_QUEUES; q++) {
-		drm_sched_job_recovery(&v3d->queue[q].sched);
-		kthread_unpark(v3d->queue[q].sched.thread);
+		drm_sched_start(&v3d->queue[q].sched, false);
 	}
 
 	mutex_unlock(&v3d->reset_lock);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 47e1979..c94b592 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -175,6 +175,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
  *               finished to remove the job from the
  *               @drm_gpu_scheduler.ring_mirror_list.
  * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @finish_node: used in a list to wait on before resetting the scheduler
  * @id: a unique id assigned to each job scheduled on the scheduler.
  * @karma: increment on every hang caused by this job. If this exceeds the hang
  *         limit of the scheduler then the job is marked guilty and will not
@@ -193,6 +194,7 @@ struct drm_sched_job {
 	struct dma_fence_cb		finish_cb;
 	struct work_struct		finish_work;
 	struct list_head		node;
+	struct list_head		finish_node;
 	uint64_t			id;
 	atomic_t			karma;
 	enum drm_sched_priority		s_priority;
@@ -298,9 +300,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		       void *owner);
 void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
-void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
-			    struct drm_sched_job *job);
-void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
+void drm_sched_stop(struct drm_gpu_scheduler *sched,
+		    struct drm_sched_job *job,
+		    bool park_only);
+void drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only);
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
 				    struct drm_sched_entity *entity);
 void drm_sched_fault(struct drm_gpu_scheduler *sched);
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v3 2/2] drm/sched: Rework HW fence processing.
       [not found] ` <1544478238-13310-1-git-send-email-andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-10 21:43   ` Andrey Grodzovsky
       [not found]     ` <1544478238-13310-2-git-send-email-andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>
  2018-12-17 15:27   ` [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling Christian König
  1 sibling, 1 reply; 10+ messages in thread
From: Andrey Grodzovsky @ 2018-12-10 21:43 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: david1.zhou-5C7GfCeVMHo, Monk.Liu-5C7GfCeVMHo, Andrey Grodzovsky

Expedite job deletion from ring mirror list to the HW fence signal
callback instead from finish_work, together with waiting for all
such fences to signal in drm_sched_stop we garantee that
already signaled job will not be processed twice.
Remove the sched finish fence callback and just submit finish_work
directly from the HW fence callback.

v2: Fix comments.

v3: Attach  hw fence cb to sched_job

Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 58 ++++++++++++++++------------------
 include/drm/gpu_scheduler.h            |  6 ++--
 2 files changed, 30 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index cdf95e2..f0c1f32 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -284,8 +284,6 @@ static void drm_sched_job_finish(struct work_struct *work)
 	cancel_delayed_work_sync(&sched->work_tdr);
 
 	spin_lock_irqsave(&sched->job_list_lock, flags);
-	/* remove job from ring_mirror_list */
-	list_del_init(&s_job->node);
 	/* queue TDR for next job */
 	drm_sched_start_timeout(sched);
 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
@@ -293,22 +291,11 @@ static void drm_sched_job_finish(struct work_struct *work)
 	sched->ops->free_job(s_job);
 }
 
-static void drm_sched_job_finish_cb(struct dma_fence *f,
-				    struct dma_fence_cb *cb)
-{
-	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
-						 finish_cb);
-	schedule_work(&job->finish_work);
-}
-
 static void drm_sched_job_begin(struct drm_sched_job *s_job)
 {
 	struct drm_gpu_scheduler *sched = s_job->sched;
 	unsigned long flags;
 
-	dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
-			       drm_sched_job_finish_cb);
-
 	spin_lock_irqsave(&sched->job_list_lock, flags);
 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
 	drm_sched_start_timeout(sched);
@@ -359,12 +346,11 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad,
 	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
 		if (s_job->s_fence->parent &&
 		    dma_fence_remove_callback(s_job->s_fence->parent,
-					      &s_job->s_fence->cb)) {
+					      &s_job->cb)) {
 			dma_fence_put(s_job->s_fence->parent);
 			s_job->s_fence->parent = NULL;
 			atomic_dec(&sched->hw_rq_count);
-		}
-		else {
+		} else {
 			/* TODO Is it get/put neccessey here ? */
 			dma_fence_get(&s_job->s_fence->finished);
 			list_add(&s_job->finish_node, &wait_list);
@@ -417,31 +403,34 @@ EXPORT_SYMBOL(drm_sched_stop);
 void drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)
 {
 	struct drm_sched_job *s_job, *tmp;
-	unsigned long flags;
 	int r;
 
 	if (unpark_only)
 		goto unpark;
 
-	spin_lock_irqsave(&sched->job_list_lock, flags);	
+	/*
+	 * Locking the list is not required here as the sched thread is parked
+	 * so no new jobs are being pushed in to HW and in drm_sched_stop we
+	 * flushed all the jobs who were still in mirror list but who already
+	 * signaled and removed them self from the list. Also concurrent
+	 * GPU recovers can't run in parallel.
+	 */
 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
-		struct drm_sched_fence *s_fence = s_job->s_fence;
 		struct dma_fence *fence = s_job->s_fence->parent;
 
 		if (fence) {
-			r = dma_fence_add_callback(fence, &s_fence->cb,
+			r = dma_fence_add_callback(fence, &s_job->cb,
 						   drm_sched_process_job);
 			if (r == -ENOENT)
-				drm_sched_process_job(fence, &s_fence->cb);
+				drm_sched_process_job(fence, &s_job->cb);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
 		} else
-			drm_sched_process_job(NULL, &s_fence->cb);
+			drm_sched_process_job(NULL, &s_job->cb);
 	}
 
 	drm_sched_start_timeout(sched);
-	spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
 unpark:
 	kthread_unpark(sched->thread);
@@ -590,18 +579,27 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
  */
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 {
-	struct drm_sched_fence *s_fence =
-		container_of(cb, struct drm_sched_fence, cb);
+	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+	struct drm_sched_fence *s_fence = s_job->s_fence;
 	struct drm_gpu_scheduler *sched = s_fence->sched;
+	unsigned long flags;
+
+	cancel_delayed_work(&sched->work_tdr);
 
-	dma_fence_get(&s_fence->finished);
 	atomic_dec(&sched->hw_rq_count);
 	atomic_dec(&sched->num_jobs);
+
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	/* remove job from ring_mirror_list */
+	list_del_init(&s_job->node);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
 	drm_sched_fence_finished(s_fence);
 
 	trace_drm_sched_process_job(s_fence);
-	dma_fence_put(&s_fence->finished);
 	wake_up_interruptible(&sched->wake_up_worker);
+
+	schedule_work(&s_job->finish_work);
 }
 
 /**
@@ -664,16 +662,16 @@ static int drm_sched_main(void *param)
 
 		if (fence) {
 			s_fence->parent = dma_fence_get(fence);
-			r = dma_fence_add_callback(fence, &s_fence->cb,
+			r = dma_fence_add_callback(fence, &sched_job->cb,
 						   drm_sched_process_job);
 			if (r == -ENOENT)
-				drm_sched_process_job(fence, &s_fence->cb);
+				drm_sched_process_job(fence, &sched_job->cb);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
 			dma_fence_put(fence);
 		} else
-			drm_sched_process_job(NULL, &s_fence->cb);
+			drm_sched_process_job(NULL, &sched_job->cb);
 
 		wake_up(&sched->job_scheduled);
 	}
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index c94b592..f29aa1c 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -138,10 +138,6 @@ struct drm_sched_fence {
 	struct dma_fence		finished;
 
         /**
-         * @cb: the callback for the parent fence below.
-         */
-	struct dma_fence_cb		cb;
-        /**
          * @parent: the fence returned by &drm_sched_backend_ops.run_job
          * when scheduling the job on hardware. We signal the
          * &drm_sched_fence.finished fence once parent is signalled.
@@ -182,6 +178,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
  *         be scheduled further.
  * @s_priority: the priority of the job.
  * @entity: the entity to which this job belongs.
+ * @cb: the callback for the parent fence in s_fence.
  *
  * A job is created by the driver using drm_sched_job_init(), and
  * should call drm_sched_entity_push_job() once it wants the scheduler
@@ -199,6 +196,7 @@ struct drm_sched_job {
 	atomic_t			karma;
 	enum drm_sched_priority		s_priority;
 	struct drm_sched_entity  *entity;
+	struct dma_fence_cb		cb;
 };
 
 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* RE: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
       [not found]     ` <1544478238-13310-2-git-send-email-andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-11  2:45       ` Zhou, David(ChunMing)
       [not found]         ` <SN1PR12MB05109A9D297261FC186B2627B4A60-z7L1TMIYDg5tVDmkcP8tDwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Zhou, David(ChunMing) @ 2018-12-11  2:45 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Grodzovsky, Andrey, Liu, Monk

I don't think adding cb to sched job would work as soon as their lifetime is different with fence.
Unless you make the sched job reference, otherwise we will get trouble sooner or later.

-David

> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
> Andrey Grodzovsky
> Sent: Tuesday, December 11, 2018 5:44 AM
> To: dri-devel@lists.freedesktop.org; amd-gfx@lists.freedesktop.org;
> ckoenig.leichtzumerken@gmail.com; eric@anholt.net;
> etnaviv@lists.freedesktop.org
> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; Liu, Monk
> <Monk.Liu@amd.com>; Grodzovsky, Andrey
> <Andrey.Grodzovsky@amd.com>
> Subject: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
> 
> Expedite job deletion from ring mirror list to the HW fence signal callback
> instead from finish_work, together with waiting for all such fences to signal in
> drm_sched_stop we garantee that already signaled job will not be processed
> twice.
> Remove the sched finish fence callback and just submit finish_work directly
> from the HW fence callback.
> 
> v2: Fix comments.
> 
> v3: Attach  hw fence cb to sched_job
> 
> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 58 ++++++++++++++++----------
> --------
>  include/drm/gpu_scheduler.h            |  6 ++--
>  2 files changed, 30 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
> b/drivers/gpu/drm/scheduler/sched_main.c
> index cdf95e2..f0c1f32 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -284,8 +284,6 @@ static void drm_sched_job_finish(struct work_struct
> *work)
>  	cancel_delayed_work_sync(&sched->work_tdr);
> 
>  	spin_lock_irqsave(&sched->job_list_lock, flags);
> -	/* remove job from ring_mirror_list */
> -	list_del_init(&s_job->node);
>  	/* queue TDR for next job */
>  	drm_sched_start_timeout(sched);
>  	spin_unlock_irqrestore(&sched->job_list_lock, flags); @@ -293,22
> +291,11 @@ static void drm_sched_job_finish(struct work_struct *work)
>  	sched->ops->free_job(s_job);
>  }
> 
> -static void drm_sched_job_finish_cb(struct dma_fence *f,
> -				    struct dma_fence_cb *cb)
> -{
> -	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
> -						 finish_cb);
> -	schedule_work(&job->finish_work);
> -}
> -
>  static void drm_sched_job_begin(struct drm_sched_job *s_job)  {
>  	struct drm_gpu_scheduler *sched = s_job->sched;
>  	unsigned long flags;
> 
> -	dma_fence_add_callback(&s_job->s_fence->finished, &s_job-
> >finish_cb,
> -			       drm_sched_job_finish_cb);
> -
>  	spin_lock_irqsave(&sched->job_list_lock, flags);
>  	list_add_tail(&s_job->node, &sched->ring_mirror_list);
>  	drm_sched_start_timeout(sched);
> @@ -359,12 +346,11 @@ void drm_sched_stop(struct drm_gpu_scheduler
> *sched, struct drm_sched_job *bad,
>  	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node)
> {
>  		if (s_job->s_fence->parent &&
>  		    dma_fence_remove_callback(s_job->s_fence->parent,
> -					      &s_job->s_fence->cb)) {
> +					      &s_job->cb)) {
>  			dma_fence_put(s_job->s_fence->parent);
>  			s_job->s_fence->parent = NULL;
>  			atomic_dec(&sched->hw_rq_count);
> -		}
> -		else {
> +		} else {
>  			/* TODO Is it get/put neccessey here ? */
>  			dma_fence_get(&s_job->s_fence->finished);
>  			list_add(&s_job->finish_node, &wait_list); @@ -
> 417,31 +403,34 @@ EXPORT_SYMBOL(drm_sched_stop);  void
> drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)  {
>  	struct drm_sched_job *s_job, *tmp;
> -	unsigned long flags;
>  	int r;
> 
>  	if (unpark_only)
>  		goto unpark;
> 
> -	spin_lock_irqsave(&sched->job_list_lock, flags);
> +	/*
> +	 * Locking the list is not required here as the sched thread is parked
> +	 * so no new jobs are being pushed in to HW and in drm_sched_stop
> we
> +	 * flushed all the jobs who were still in mirror list but who already
> +	 * signaled and removed them self from the list. Also concurrent
> +	 * GPU recovers can't run in parallel.
> +	 */
>  	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list,
> node) {
> -		struct drm_sched_fence *s_fence = s_job->s_fence;
>  		struct dma_fence *fence = s_job->s_fence->parent;
> 
>  		if (fence) {
> -			r = dma_fence_add_callback(fence, &s_fence->cb,
> +			r = dma_fence_add_callback(fence, &s_job->cb,
>  						   drm_sched_process_job);
>  			if (r == -ENOENT)
> -				drm_sched_process_job(fence, &s_fence-
> >cb);
> +				drm_sched_process_job(fence, &s_job->cb);
>  			else if (r)
>  				DRM_ERROR("fence add callback failed
> (%d)\n",
>  					  r);
>  		} else
> -			drm_sched_process_job(NULL, &s_fence->cb);
> +			drm_sched_process_job(NULL, &s_job->cb);
>  	}
> 
>  	drm_sched_start_timeout(sched);
> -	spin_unlock_irqrestore(&sched->job_list_lock, flags);
> 
>  unpark:
>  	kthread_unpark(sched->thread);
> @@ -590,18 +579,27 @@ drm_sched_select_entity(struct
> drm_gpu_scheduler *sched)
>   */
>  static void drm_sched_process_job(struct dma_fence *f, struct
> dma_fence_cb *cb)  {
> -	struct drm_sched_fence *s_fence =
> -		container_of(cb, struct drm_sched_fence, cb);
> +	struct drm_sched_job *s_job = container_of(cb, struct
> drm_sched_job, cb);
> +	struct drm_sched_fence *s_fence = s_job->s_fence;
>  	struct drm_gpu_scheduler *sched = s_fence->sched;
> +	unsigned long flags;
> +
> +	cancel_delayed_work(&sched->work_tdr);
> 
> -	dma_fence_get(&s_fence->finished);
>  	atomic_dec(&sched->hw_rq_count);
>  	atomic_dec(&sched->num_jobs);
> +
> +	spin_lock_irqsave(&sched->job_list_lock, flags);
> +	/* remove job from ring_mirror_list */
> +	list_del_init(&s_job->node);
> +	spin_unlock_irqrestore(&sched->job_list_lock, flags);
> +
>  	drm_sched_fence_finished(s_fence);
> 
>  	trace_drm_sched_process_job(s_fence);
> -	dma_fence_put(&s_fence->finished);
>  	wake_up_interruptible(&sched->wake_up_worker);
> +
> +	schedule_work(&s_job->finish_work);
>  }
> 
>  /**
> @@ -664,16 +662,16 @@ static int drm_sched_main(void *param)
> 
>  		if (fence) {
>  			s_fence->parent = dma_fence_get(fence);
> -			r = dma_fence_add_callback(fence, &s_fence->cb,
> +			r = dma_fence_add_callback(fence, &sched_job->cb,
>  						   drm_sched_process_job);
>  			if (r == -ENOENT)
> -				drm_sched_process_job(fence, &s_fence-
> >cb);
> +				drm_sched_process_job(fence, &sched_job-
> >cb);
>  			else if (r)
>  				DRM_ERROR("fence add callback failed
> (%d)\n",
>  					  r);
>  			dma_fence_put(fence);
>  		} else
> -			drm_sched_process_job(NULL, &s_fence->cb);
> +			drm_sched_process_job(NULL, &sched_job->cb);
> 
>  		wake_up(&sched->job_scheduled);
>  	}
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index c94b592..f29aa1c 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -138,10 +138,6 @@ struct drm_sched_fence {
>  	struct dma_fence		finished;
> 
>          /**
> -         * @cb: the callback for the parent fence below.
> -         */
> -	struct dma_fence_cb		cb;
> -        /**
>           * @parent: the fence returned by &drm_sched_backend_ops.run_job
>           * when scheduling the job on hardware. We signal the
>           * &drm_sched_fence.finished fence once parent is signalled.
> @@ -182,6 +178,7 @@ struct drm_sched_fence
> *to_drm_sched_fence(struct dma_fence *f);
>   *         be scheduled further.
>   * @s_priority: the priority of the job.
>   * @entity: the entity to which this job belongs.
> + * @cb: the callback for the parent fence in s_fence.
>   *
>   * A job is created by the driver using drm_sched_job_init(), and
>   * should call drm_sched_entity_push_job() once it wants the scheduler
> @@ -199,6 +196,7 @@ struct drm_sched_job {
>  	atomic_t			karma;
>  	enum drm_sched_priority		s_priority;
>  	struct drm_sched_entity  *entity;
> +	struct dma_fence_cb		cb;
>  };
> 
>  static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
> --
> 2.7.4
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
       [not found]         ` <SN1PR12MB05109A9D297261FC186B2627B4A60-z7L1TMIYDg5tVDmkcP8tDwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-12-11 16:01           ` Grodzovsky, Andrey
       [not found]             ` <28c3bebc-9661-198c-7ea9-e59260d658c9-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Grodzovsky, Andrey @ 2018-12-11 16:01 UTC (permalink / raw)
  To: Zhou, David(ChunMing),
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Liu, Monk

A I understand you say that by the time the fence callback runs the job 
might have already been released,

but how so if the job gets released from drm_sched_job_finish work 
handler in the normal flow - so, after the HW

fence (s_fence->parent) cb is executed. Other 2 flows are error use 
cases where amdgpu_job_free is called directly in which

cases I assume the job wasn't submitted to HW. Last flow I see is 
drm_sched_entity_kill_jobs_cb and here I actually see a problem

with the code as it's today - drm_sched_fence_finished is called which 
will trigger s_fence->finished callback to run which today

schedules drm_sched_job_finish which releases the job, but we don't even 
wait for that and call free_job cb directly from

after that which seems wrong to me.

Andrey


On 12/10/2018 09:45 PM, Zhou, David(ChunMing) wrote:
> I don't think adding cb to sched job would work as soon as their lifetime is different with fence.
> Unless you make the sched job reference, otherwise we will get trouble sooner or later.
>
> -David
>
>> -----Original Message-----
>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
>> Andrey Grodzovsky
>> Sent: Tuesday, December 11, 2018 5:44 AM
>> To: dri-devel@lists.freedesktop.org; amd-gfx@lists.freedesktop.org;
>> ckoenig.leichtzumerken@gmail.com; eric@anholt.net;
>> etnaviv@lists.freedesktop.org
>> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; Liu, Monk
>> <Monk.Liu@amd.com>; Grodzovsky, Andrey
>> <Andrey.Grodzovsky@amd.com>
>> Subject: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
>>
>> Expedite job deletion from ring mirror list to the HW fence signal callback
>> instead from finish_work, together with waiting for all such fences to signal in
>> drm_sched_stop we garantee that already signaled job will not be processed
>> twice.
>> Remove the sched finish fence callback and just submit finish_work directly
>> from the HW fence callback.
>>
>> v2: Fix comments.
>>
>> v3: Attach  hw fence cb to sched_job
>>
>> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>> ---
>>   drivers/gpu/drm/scheduler/sched_main.c | 58 ++++++++++++++++----------
>> --------
>>   include/drm/gpu_scheduler.h            |  6 ++--
>>   2 files changed, 30 insertions(+), 34 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
>> b/drivers/gpu/drm/scheduler/sched_main.c
>> index cdf95e2..f0c1f32 100644
>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>> @@ -284,8 +284,6 @@ static void drm_sched_job_finish(struct work_struct
>> *work)
>>   	cancel_delayed_work_sync(&sched->work_tdr);
>>
>>   	spin_lock_irqsave(&sched->job_list_lock, flags);
>> -	/* remove job from ring_mirror_list */
>> -	list_del_init(&s_job->node);
>>   	/* queue TDR for next job */
>>   	drm_sched_start_timeout(sched);
>>   	spin_unlock_irqrestore(&sched->job_list_lock, flags); @@ -293,22
>> +291,11 @@ static void drm_sched_job_finish(struct work_struct *work)
>>   	sched->ops->free_job(s_job);
>>   }
>>
>> -static void drm_sched_job_finish_cb(struct dma_fence *f,
>> -				    struct dma_fence_cb *cb)
>> -{
>> -	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
>> -						 finish_cb);
>> -	schedule_work(&job->finish_work);
>> -}
>> -
>>   static void drm_sched_job_begin(struct drm_sched_job *s_job)  {
>>   	struct drm_gpu_scheduler *sched = s_job->sched;
>>   	unsigned long flags;
>>
>> -	dma_fence_add_callback(&s_job->s_fence->finished, &s_job-
>>> finish_cb,
>> -			       drm_sched_job_finish_cb);
>> -
>>   	spin_lock_irqsave(&sched->job_list_lock, flags);
>>   	list_add_tail(&s_job->node, &sched->ring_mirror_list);
>>   	drm_sched_start_timeout(sched);
>> @@ -359,12 +346,11 @@ void drm_sched_stop(struct drm_gpu_scheduler
>> *sched, struct drm_sched_job *bad,
>>   	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node)
>> {
>>   		if (s_job->s_fence->parent &&
>>   		    dma_fence_remove_callback(s_job->s_fence->parent,
>> -					      &s_job->s_fence->cb)) {
>> +					      &s_job->cb)) {
>>   			dma_fence_put(s_job->s_fence->parent);
>>   			s_job->s_fence->parent = NULL;
>>   			atomic_dec(&sched->hw_rq_count);
>> -		}
>> -		else {
>> +		} else {
>>   			/* TODO Is it get/put neccessey here ? */
>>   			dma_fence_get(&s_job->s_fence->finished);
>>   			list_add(&s_job->finish_node, &wait_list); @@ -
>> 417,31 +403,34 @@ EXPORT_SYMBOL(drm_sched_stop);  void
>> drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)  {
>>   	struct drm_sched_job *s_job, *tmp;
>> -	unsigned long flags;
>>   	int r;
>>
>>   	if (unpark_only)
>>   		goto unpark;
>>
>> -	spin_lock_irqsave(&sched->job_list_lock, flags);
>> +	/*
>> +	 * Locking the list is not required here as the sched thread is parked
>> +	 * so no new jobs are being pushed in to HW and in drm_sched_stop
>> we
>> +	 * flushed all the jobs who were still in mirror list but who already
>> +	 * signaled and removed them self from the list. Also concurrent
>> +	 * GPU recovers can't run in parallel.
>> +	 */
>>   	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list,
>> node) {
>> -		struct drm_sched_fence *s_fence = s_job->s_fence;
>>   		struct dma_fence *fence = s_job->s_fence->parent;
>>
>>   		if (fence) {
>> -			r = dma_fence_add_callback(fence, &s_fence->cb,
>> +			r = dma_fence_add_callback(fence, &s_job->cb,
>>   						   drm_sched_process_job);
>>   			if (r == -ENOENT)
>> -				drm_sched_process_job(fence, &s_fence-
>>> cb);
>> +				drm_sched_process_job(fence, &s_job->cb);
>>   			else if (r)
>>   				DRM_ERROR("fence add callback failed
>> (%d)\n",
>>   					  r);
>>   		} else
>> -			drm_sched_process_job(NULL, &s_fence->cb);
>> +			drm_sched_process_job(NULL, &s_job->cb);
>>   	}
>>
>>   	drm_sched_start_timeout(sched);
>> -	spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>
>>   unpark:
>>   	kthread_unpark(sched->thread);
>> @@ -590,18 +579,27 @@ drm_sched_select_entity(struct
>> drm_gpu_scheduler *sched)
>>    */
>>   static void drm_sched_process_job(struct dma_fence *f, struct
>> dma_fence_cb *cb)  {
>> -	struct drm_sched_fence *s_fence =
>> -		container_of(cb, struct drm_sched_fence, cb);
>> +	struct drm_sched_job *s_job = container_of(cb, struct
>> drm_sched_job, cb);
>> +	struct drm_sched_fence *s_fence = s_job->s_fence;
>>   	struct drm_gpu_scheduler *sched = s_fence->sched;
>> +	unsigned long flags;
>> +
>> +	cancel_delayed_work(&sched->work_tdr);
>>
>> -	dma_fence_get(&s_fence->finished);
>>   	atomic_dec(&sched->hw_rq_count);
>>   	atomic_dec(&sched->num_jobs);
>> +
>> +	spin_lock_irqsave(&sched->job_list_lock, flags);
>> +	/* remove job from ring_mirror_list */
>> +	list_del_init(&s_job->node);
>> +	spin_unlock_irqrestore(&sched->job_list_lock, flags);
>> +
>>   	drm_sched_fence_finished(s_fence);
>>
>>   	trace_drm_sched_process_job(s_fence);
>> -	dma_fence_put(&s_fence->finished);
>>   	wake_up_interruptible(&sched->wake_up_worker);
>> +
>> +	schedule_work(&s_job->finish_work);
>>   }
>>
>>   /**
>> @@ -664,16 +662,16 @@ static int drm_sched_main(void *param)
>>
>>   		if (fence) {
>>   			s_fence->parent = dma_fence_get(fence);
>> -			r = dma_fence_add_callback(fence, &s_fence->cb,
>> +			r = dma_fence_add_callback(fence, &sched_job->cb,
>>   						   drm_sched_process_job);
>>   			if (r == -ENOENT)
>> -				drm_sched_process_job(fence, &s_fence-
>>> cb);
>> +				drm_sched_process_job(fence, &sched_job-
>>> cb);
>>   			else if (r)
>>   				DRM_ERROR("fence add callback failed
>> (%d)\n",
>>   					  r);
>>   			dma_fence_put(fence);
>>   		} else
>> -			drm_sched_process_job(NULL, &s_fence->cb);
>> +			drm_sched_process_job(NULL, &sched_job->cb);
>>
>>   		wake_up(&sched->job_scheduled);
>>   	}
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index c94b592..f29aa1c 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -138,10 +138,6 @@ struct drm_sched_fence {
>>   	struct dma_fence		finished;
>>
>>           /**
>> -         * @cb: the callback for the parent fence below.
>> -         */
>> -	struct dma_fence_cb		cb;
>> -        /**
>>            * @parent: the fence returned by &drm_sched_backend_ops.run_job
>>            * when scheduling the job on hardware. We signal the
>>            * &drm_sched_fence.finished fence once parent is signalled.
>> @@ -182,6 +178,7 @@ struct drm_sched_fence
>> *to_drm_sched_fence(struct dma_fence *f);
>>    *         be scheduled further.
>>    * @s_priority: the priority of the job.
>>    * @entity: the entity to which this job belongs.
>> + * @cb: the callback for the parent fence in s_fence.
>>    *
>>    * A job is created by the driver using drm_sched_job_init(), and
>>    * should call drm_sched_entity_push_job() once it wants the scheduler
>> @@ -199,6 +196,7 @@ struct drm_sched_job {
>>   	atomic_t			karma;
>>   	enum drm_sched_priority		s_priority;
>>   	struct drm_sched_entity  *entity;
>> +	struct dma_fence_cb		cb;
>>   };
>>
>>   static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>> --
>> 2.7.4
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
       [not found]             ` <28c3bebc-9661-198c-7ea9-e59260d658c9-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-11 16:20               ` Christian König
       [not found]                 ` <9a645f9c-93d1-12cc-044b-4bc1ea84d7d9-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2018-12-11 16:20 UTC (permalink / raw)
  To: Grodzovsky, Andrey, Zhou, David(ChunMing),
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Liu, Monk

Yeah, completely correct explained.

I was unfortunately really busy today, but going to give that a look as 
soon as I have time.

Christian.

Am 11.12.18 um 17:01 schrieb Grodzovsky, Andrey:
> A I understand you say that by the time the fence callback runs the job
> might have already been released,
>
> but how so if the job gets released from drm_sched_job_finish work
> handler in the normal flow - so, after the HW
>
> fence (s_fence->parent) cb is executed. Other 2 flows are error use
> cases where amdgpu_job_free is called directly in which
>
> cases I assume the job wasn't submitted to HW. Last flow I see is
> drm_sched_entity_kill_jobs_cb and here I actually see a problem
>
> with the code as it's today - drm_sched_fence_finished is called which
> will trigger s_fence->finished callback to run which today
>
> schedules drm_sched_job_finish which releases the job, but we don't even
> wait for that and call free_job cb directly from
>
> after that which seems wrong to me.
>
> Andrey
>
>
> On 12/10/2018 09:45 PM, Zhou, David(ChunMing) wrote:
>> I don't think adding cb to sched job would work as soon as their lifetime is different with fence.
>> Unless you make the sched job reference, otherwise we will get trouble sooner or later.
>>
>> -David
>>
>>> -----Original Message-----
>>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
>>> Andrey Grodzovsky
>>> Sent: Tuesday, December 11, 2018 5:44 AM
>>> To: dri-devel@lists.freedesktop.org; amd-gfx@lists.freedesktop.org;
>>> ckoenig.leichtzumerken@gmail.com; eric@anholt.net;
>>> etnaviv@lists.freedesktop.org
>>> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; Liu, Monk
>>> <Monk.Liu@amd.com>; Grodzovsky, Andrey
>>> <Andrey.Grodzovsky@amd.com>
>>> Subject: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
>>>
>>> Expedite job deletion from ring mirror list to the HW fence signal callback
>>> instead from finish_work, together with waiting for all such fences to signal in
>>> drm_sched_stop we garantee that already signaled job will not be processed
>>> twice.
>>> Remove the sched finish fence callback and just submit finish_work directly
>>> from the HW fence callback.
>>>
>>> v2: Fix comments.
>>>
>>> v3: Attach  hw fence cb to sched_job
>>>
>>> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>> ---
>>>    drivers/gpu/drm/scheduler/sched_main.c | 58 ++++++++++++++++----------
>>> --------
>>>    include/drm/gpu_scheduler.h            |  6 ++--
>>>    2 files changed, 30 insertions(+), 34 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
>>> b/drivers/gpu/drm/scheduler/sched_main.c
>>> index cdf95e2..f0c1f32 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -284,8 +284,6 @@ static void drm_sched_job_finish(struct work_struct
>>> *work)
>>>    	cancel_delayed_work_sync(&sched->work_tdr);
>>>
>>>    	spin_lock_irqsave(&sched->job_list_lock, flags);
>>> -	/* remove job from ring_mirror_list */
>>> -	list_del_init(&s_job->node);
>>>    	/* queue TDR for next job */
>>>    	drm_sched_start_timeout(sched);
>>>    	spin_unlock_irqrestore(&sched->job_list_lock, flags); @@ -293,22
>>> +291,11 @@ static void drm_sched_job_finish(struct work_struct *work)
>>>    	sched->ops->free_job(s_job);
>>>    }
>>>
>>> -static void drm_sched_job_finish_cb(struct dma_fence *f,
>>> -				    struct dma_fence_cb *cb)
>>> -{
>>> -	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
>>> -						 finish_cb);
>>> -	schedule_work(&job->finish_work);
>>> -}
>>> -
>>>    static void drm_sched_job_begin(struct drm_sched_job *s_job)  {
>>>    	struct drm_gpu_scheduler *sched = s_job->sched;
>>>    	unsigned long flags;
>>>
>>> -	dma_fence_add_callback(&s_job->s_fence->finished, &s_job-
>>>> finish_cb,
>>> -			       drm_sched_job_finish_cb);
>>> -
>>>    	spin_lock_irqsave(&sched->job_list_lock, flags);
>>>    	list_add_tail(&s_job->node, &sched->ring_mirror_list);
>>>    	drm_sched_start_timeout(sched);
>>> @@ -359,12 +346,11 @@ void drm_sched_stop(struct drm_gpu_scheduler
>>> *sched, struct drm_sched_job *bad,
>>>    	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node)
>>> {
>>>    		if (s_job->s_fence->parent &&
>>>    		    dma_fence_remove_callback(s_job->s_fence->parent,
>>> -					      &s_job->s_fence->cb)) {
>>> +					      &s_job->cb)) {
>>>    			dma_fence_put(s_job->s_fence->parent);
>>>    			s_job->s_fence->parent = NULL;
>>>    			atomic_dec(&sched->hw_rq_count);
>>> -		}
>>> -		else {
>>> +		} else {
>>>    			/* TODO Is it get/put neccessey here ? */
>>>    			dma_fence_get(&s_job->s_fence->finished);
>>>    			list_add(&s_job->finish_node, &wait_list); @@ -
>>> 417,31 +403,34 @@ EXPORT_SYMBOL(drm_sched_stop);  void
>>> drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)  {
>>>    	struct drm_sched_job *s_job, *tmp;
>>> -	unsigned long flags;
>>>    	int r;
>>>
>>>    	if (unpark_only)
>>>    		goto unpark;
>>>
>>> -	spin_lock_irqsave(&sched->job_list_lock, flags);
>>> +	/*
>>> +	 * Locking the list is not required here as the sched thread is parked
>>> +	 * so no new jobs are being pushed in to HW and in drm_sched_stop
>>> we
>>> +	 * flushed all the jobs who were still in mirror list but who already
>>> +	 * signaled and removed them self from the list. Also concurrent
>>> +	 * GPU recovers can't run in parallel.
>>> +	 */
>>>    	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list,
>>> node) {
>>> -		struct drm_sched_fence *s_fence = s_job->s_fence;
>>>    		struct dma_fence *fence = s_job->s_fence->parent;
>>>
>>>    		if (fence) {
>>> -			r = dma_fence_add_callback(fence, &s_fence->cb,
>>> +			r = dma_fence_add_callback(fence, &s_job->cb,
>>>    						   drm_sched_process_job);
>>>    			if (r == -ENOENT)
>>> -				drm_sched_process_job(fence, &s_fence-
>>>> cb);
>>> +				drm_sched_process_job(fence, &s_job->cb);
>>>    			else if (r)
>>>    				DRM_ERROR("fence add callback failed
>>> (%d)\n",
>>>    					  r);
>>>    		} else
>>> -			drm_sched_process_job(NULL, &s_fence->cb);
>>> +			drm_sched_process_job(NULL, &s_job->cb);
>>>    	}
>>>
>>>    	drm_sched_start_timeout(sched);
>>> -	spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>>
>>>    unpark:
>>>    	kthread_unpark(sched->thread);
>>> @@ -590,18 +579,27 @@ drm_sched_select_entity(struct
>>> drm_gpu_scheduler *sched)
>>>     */
>>>    static void drm_sched_process_job(struct dma_fence *f, struct
>>> dma_fence_cb *cb)  {
>>> -	struct drm_sched_fence *s_fence =
>>> -		container_of(cb, struct drm_sched_fence, cb);
>>> +	struct drm_sched_job *s_job = container_of(cb, struct
>>> drm_sched_job, cb);
>>> +	struct drm_sched_fence *s_fence = s_job->s_fence;
>>>    	struct drm_gpu_scheduler *sched = s_fence->sched;
>>> +	unsigned long flags;
>>> +
>>> +	cancel_delayed_work(&sched->work_tdr);
>>>
>>> -	dma_fence_get(&s_fence->finished);
>>>    	atomic_dec(&sched->hw_rq_count);
>>>    	atomic_dec(&sched->num_jobs);
>>> +
>>> +	spin_lock_irqsave(&sched->job_list_lock, flags);
>>> +	/* remove job from ring_mirror_list */
>>> +	list_del_init(&s_job->node);
>>> +	spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>> +
>>>    	drm_sched_fence_finished(s_fence);
>>>
>>>    	trace_drm_sched_process_job(s_fence);
>>> -	dma_fence_put(&s_fence->finished);
>>>    	wake_up_interruptible(&sched->wake_up_worker);
>>> +
>>> +	schedule_work(&s_job->finish_work);
>>>    }
>>>
>>>    /**
>>> @@ -664,16 +662,16 @@ static int drm_sched_main(void *param)
>>>
>>>    		if (fence) {
>>>    			s_fence->parent = dma_fence_get(fence);
>>> -			r = dma_fence_add_callback(fence, &s_fence->cb,
>>> +			r = dma_fence_add_callback(fence, &sched_job->cb,
>>>    						   drm_sched_process_job);
>>>    			if (r == -ENOENT)
>>> -				drm_sched_process_job(fence, &s_fence-
>>>> cb);
>>> +				drm_sched_process_job(fence, &sched_job-
>>>> cb);
>>>    			else if (r)
>>>    				DRM_ERROR("fence add callback failed
>>> (%d)\n",
>>>    					  r);
>>>    			dma_fence_put(fence);
>>>    		} else
>>> -			drm_sched_process_job(NULL, &s_fence->cb);
>>> +			drm_sched_process_job(NULL, &sched_job->cb);
>>>
>>>    		wake_up(&sched->job_scheduled);
>>>    	}
>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>> index c94b592..f29aa1c 100644
>>> --- a/include/drm/gpu_scheduler.h
>>> +++ b/include/drm/gpu_scheduler.h
>>> @@ -138,10 +138,6 @@ struct drm_sched_fence {
>>>    	struct dma_fence		finished;
>>>
>>>            /**
>>> -         * @cb: the callback for the parent fence below.
>>> -         */
>>> -	struct dma_fence_cb		cb;
>>> -        /**
>>>             * @parent: the fence returned by &drm_sched_backend_ops.run_job
>>>             * when scheduling the job on hardware. We signal the
>>>             * &drm_sched_fence.finished fence once parent is signalled.
>>> @@ -182,6 +178,7 @@ struct drm_sched_fence
>>> *to_drm_sched_fence(struct dma_fence *f);
>>>     *         be scheduled further.
>>>     * @s_priority: the priority of the job.
>>>     * @entity: the entity to which this job belongs.
>>> + * @cb: the callback for the parent fence in s_fence.
>>>     *
>>>     * A job is created by the driver using drm_sched_job_init(), and
>>>     * should call drm_sched_entity_push_job() once it wants the scheduler
>>> @@ -199,6 +196,7 @@ struct drm_sched_job {
>>>    	atomic_t			karma;
>>>    	enum drm_sched_priority		s_priority;
>>>    	struct drm_sched_entity  *entity;
>>> +	struct dma_fence_cb		cb;
>>>    };
>>>
>>>    static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>> --
>>> 2.7.4
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
       [not found]                 ` <9a645f9c-93d1-12cc-044b-4bc1ea84d7d9-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-12-12 13:08                   ` Grodzovsky, Andrey
       [not found]                     ` <28bb2244-c2bf-3a3f-18e4-df9aef45cd58-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Grodzovsky, Andrey @ 2018-12-12 13:08 UTC (permalink / raw)
  To: Koenig, Christian, Zhou, David(ChunMing),
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Liu, Monk

BTW, the problem I pointed out with drm_sched_entity_kill_jobs_cb is not 
an issue with this patch set since it removes the cb from 
s_fence->finished in general so we only free the job once - directly 
from drm_sched_entity_kill_jobs_cb.

Andrey


On 12/11/2018 11:20 AM, Christian König wrote:
> Yeah, completely correct explained.
>
> I was unfortunately really busy today, but going to give that a look 
> as soon as I have time.
>
> Christian.
>
> Am 11.12.18 um 17:01 schrieb Grodzovsky, Andrey:
>> A I understand you say that by the time the fence callback runs the job
>> might have already been released,
>>
>> but how so if the job gets released from drm_sched_job_finish work
>> handler in the normal flow - so, after the HW
>>
>> fence (s_fence->parent) cb is executed. Other 2 flows are error use
>> cases where amdgpu_job_free is called directly in which
>>
>> cases I assume the job wasn't submitted to HW. Last flow I see is
>> drm_sched_entity_kill_jobs_cb and here I actually see a problem
>>
>> with the code as it's today - drm_sched_fence_finished is called which
>> will trigger s_fence->finished callback to run which today
>>
>> schedules drm_sched_job_finish which releases the job, but we don't even
>> wait for that and call free_job cb directly from
>>
>> after that which seems wrong to me.
>>
>> Andrey
>>
>>
>> On 12/10/2018 09:45 PM, Zhou, David(ChunMing) wrote:
>>> I don't think adding cb to sched job would work as soon as their 
>>> lifetime is different with fence.
>>> Unless you make the sched job reference, otherwise we will get 
>>> trouble sooner or later.
>>>
>>> -David
>>>
>>>> -----Original Message-----
>>>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
>>>> Andrey Grodzovsky
>>>> Sent: Tuesday, December 11, 2018 5:44 AM
>>>> To: dri-devel@lists.freedesktop.org; amd-gfx@lists.freedesktop.org;
>>>> ckoenig.leichtzumerken@gmail.com; eric@anholt.net;
>>>> etnaviv@lists.freedesktop.org
>>>> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; Liu, Monk
>>>> <Monk.Liu@amd.com>; Grodzovsky, Andrey
>>>> <Andrey.Grodzovsky@amd.com>
>>>> Subject: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
>>>>
>>>> Expedite job deletion from ring mirror list to the HW fence signal 
>>>> callback
>>>> instead from finish_work, together with waiting for all such fences 
>>>> to signal in
>>>> drm_sched_stop we garantee that already signaled job will not be 
>>>> processed
>>>> twice.
>>>> Remove the sched finish fence callback and just submit finish_work 
>>>> directly
>>>> from the HW fence callback.
>>>>
>>>> v2: Fix comments.
>>>>
>>>> v3: Attach  hw fence cb to sched_job
>>>>
>>>> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
>>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>>> ---
>>>>    drivers/gpu/drm/scheduler/sched_main.c | 58 
>>>> ++++++++++++++++----------
>>>> --------
>>>>    include/drm/gpu_scheduler.h            |  6 ++--
>>>>    2 files changed, 30 insertions(+), 34 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
>>>> b/drivers/gpu/drm/scheduler/sched_main.c
>>>> index cdf95e2..f0c1f32 100644
>>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>>> @@ -284,8 +284,6 @@ static void drm_sched_job_finish(struct 
>>>> work_struct
>>>> *work)
>>>>        cancel_delayed_work_sync(&sched->work_tdr);
>>>>
>>>>        spin_lock_irqsave(&sched->job_list_lock, flags);
>>>> -    /* remove job from ring_mirror_list */
>>>> -    list_del_init(&s_job->node);
>>>>        /* queue TDR for next job */
>>>>        drm_sched_start_timeout(sched);
>>>>        spin_unlock_irqrestore(&sched->job_list_lock, flags); @@ 
>>>> -293,22
>>>> +291,11 @@ static void drm_sched_job_finish(struct work_struct *work)
>>>>        sched->ops->free_job(s_job);
>>>>    }
>>>>
>>>> -static void drm_sched_job_finish_cb(struct dma_fence *f,
>>>> -                    struct dma_fence_cb *cb)
>>>> -{
>>>> -    struct drm_sched_job *job = container_of(cb, struct 
>>>> drm_sched_job,
>>>> -                         finish_cb);
>>>> -    schedule_work(&job->finish_work);
>>>> -}
>>>> -
>>>>    static void drm_sched_job_begin(struct drm_sched_job *s_job)  {
>>>>        struct drm_gpu_scheduler *sched = s_job->sched;
>>>>        unsigned long flags;
>>>>
>>>> - dma_fence_add_callback(&s_job->s_fence->finished, &s_job-
>>>>> finish_cb,
>>>> -                   drm_sched_job_finish_cb);
>>>> -
>>>>        spin_lock_irqsave(&sched->job_list_lock, flags);
>>>>        list_add_tail(&s_job->node, &sched->ring_mirror_list);
>>>>        drm_sched_start_timeout(sched);
>>>> @@ -359,12 +346,11 @@ void drm_sched_stop(struct drm_gpu_scheduler
>>>> *sched, struct drm_sched_job *bad,
>>>>        list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, 
>>>> node)
>>>> {
>>>>            if (s_job->s_fence->parent &&
>>>> dma_fence_remove_callback(s_job->s_fence->parent,
>>>> -                          &s_job->s_fence->cb)) {
>>>> +                          &s_job->cb)) {
>>>>                dma_fence_put(s_job->s_fence->parent);
>>>>                s_job->s_fence->parent = NULL;
>>>>                atomic_dec(&sched->hw_rq_count);
>>>> -        }
>>>> -        else {
>>>> +        } else {
>>>>                /* TODO Is it get/put neccessey here ? */
>>>> dma_fence_get(&s_job->s_fence->finished);
>>>>                list_add(&s_job->finish_node, &wait_list); @@ -
>>>> 417,31 +403,34 @@ EXPORT_SYMBOL(drm_sched_stop);  void
>>>> drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)  {
>>>>        struct drm_sched_job *s_job, *tmp;
>>>> -    unsigned long flags;
>>>>        int r;
>>>>
>>>>        if (unpark_only)
>>>>            goto unpark;
>>>>
>>>> -    spin_lock_irqsave(&sched->job_list_lock, flags);
>>>> +    /*
>>>> +     * Locking the list is not required here as the sched thread 
>>>> is parked
>>>> +     * so no new jobs are being pushed in to HW and in drm_sched_stop
>>>> we
>>>> +     * flushed all the jobs who were still in mirror list but who 
>>>> already
>>>> +     * signaled and removed them self from the list. Also concurrent
>>>> +     * GPU recovers can't run in parallel.
>>>> +     */
>>>>        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list,
>>>> node) {
>>>> -        struct drm_sched_fence *s_fence = s_job->s_fence;
>>>>            struct dma_fence *fence = s_job->s_fence->parent;
>>>>
>>>>            if (fence) {
>>>> -            r = dma_fence_add_callback(fence, &s_fence->cb,
>>>> +            r = dma_fence_add_callback(fence, &s_job->cb,
>>>>                               drm_sched_process_job);
>>>>                if (r == -ENOENT)
>>>> -                drm_sched_process_job(fence, &s_fence-
>>>>> cb);
>>>> +                drm_sched_process_job(fence, &s_job->cb);
>>>>                else if (r)
>>>>                    DRM_ERROR("fence add callback failed
>>>> (%d)\n",
>>>>                          r);
>>>>            } else
>>>> -            drm_sched_process_job(NULL, &s_fence->cb);
>>>> +            drm_sched_process_job(NULL, &s_job->cb);
>>>>        }
>>>>
>>>>        drm_sched_start_timeout(sched);
>>>> -    spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>>>
>>>>    unpark:
>>>>        kthread_unpark(sched->thread);
>>>> @@ -590,18 +579,27 @@ drm_sched_select_entity(struct
>>>> drm_gpu_scheduler *sched)
>>>>     */
>>>>    static void drm_sched_process_job(struct dma_fence *f, struct
>>>> dma_fence_cb *cb)  {
>>>> -    struct drm_sched_fence *s_fence =
>>>> -        container_of(cb, struct drm_sched_fence, cb);
>>>> +    struct drm_sched_job *s_job = container_of(cb, struct
>>>> drm_sched_job, cb);
>>>> +    struct drm_sched_fence *s_fence = s_job->s_fence;
>>>>        struct drm_gpu_scheduler *sched = s_fence->sched;
>>>> +    unsigned long flags;
>>>> +
>>>> +    cancel_delayed_work(&sched->work_tdr);
>>>>
>>>> -    dma_fence_get(&s_fence->finished);
>>>>        atomic_dec(&sched->hw_rq_count);
>>>>        atomic_dec(&sched->num_jobs);
>>>> +
>>>> +    spin_lock_irqsave(&sched->job_list_lock, flags);
>>>> +    /* remove job from ring_mirror_list */
>>>> +    list_del_init(&s_job->node);
>>>> +    spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>>> +
>>>>        drm_sched_fence_finished(s_fence);
>>>>
>>>>        trace_drm_sched_process_job(s_fence);
>>>> -    dma_fence_put(&s_fence->finished);
>>>>        wake_up_interruptible(&sched->wake_up_worker);
>>>> +
>>>> +    schedule_work(&s_job->finish_work);
>>>>    }
>>>>
>>>>    /**
>>>> @@ -664,16 +662,16 @@ static int drm_sched_main(void *param)
>>>>
>>>>            if (fence) {
>>>>                s_fence->parent = dma_fence_get(fence);
>>>> -            r = dma_fence_add_callback(fence, &s_fence->cb,
>>>> +            r = dma_fence_add_callback(fence, &sched_job->cb,
>>>>                               drm_sched_process_job);
>>>>                if (r == -ENOENT)
>>>> -                drm_sched_process_job(fence, &s_fence-
>>>>> cb);
>>>> +                drm_sched_process_job(fence, &sched_job-
>>>>> cb);
>>>>                else if (r)
>>>>                    DRM_ERROR("fence add callback failed
>>>> (%d)\n",
>>>>                          r);
>>>>                dma_fence_put(fence);
>>>>            } else
>>>> -            drm_sched_process_job(NULL, &s_fence->cb);
>>>> +            drm_sched_process_job(NULL, &sched_job->cb);
>>>>
>>>>            wake_up(&sched->job_scheduled);
>>>>        }
>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>> index c94b592..f29aa1c 100644
>>>> --- a/include/drm/gpu_scheduler.h
>>>> +++ b/include/drm/gpu_scheduler.h
>>>> @@ -138,10 +138,6 @@ struct drm_sched_fence {
>>>>        struct dma_fence        finished;
>>>>
>>>>            /**
>>>> -         * @cb: the callback for the parent fence below.
>>>> -         */
>>>> -    struct dma_fence_cb        cb;
>>>> -        /**
>>>>             * @parent: the fence returned by 
>>>> &drm_sched_backend_ops.run_job
>>>>             * when scheduling the job on hardware. We signal the
>>>>             * &drm_sched_fence.finished fence once parent is 
>>>> signalled.
>>>> @@ -182,6 +178,7 @@ struct drm_sched_fence
>>>> *to_drm_sched_fence(struct dma_fence *f);
>>>>     *         be scheduled further.
>>>>     * @s_priority: the priority of the job.
>>>>     * @entity: the entity to which this job belongs.
>>>> + * @cb: the callback for the parent fence in s_fence.
>>>>     *
>>>>     * A job is created by the driver using drm_sched_job_init(), and
>>>>     * should call drm_sched_entity_push_job() once it wants the 
>>>> scheduler
>>>> @@ -199,6 +196,7 @@ struct drm_sched_job {
>>>>        atomic_t            karma;
>>>>        enum drm_sched_priority        s_priority;
>>>>        struct drm_sched_entity  *entity;
>>>> +    struct dma_fence_cb        cb;
>>>>    };
>>>>
>>>>    static inline bool drm_sched_invalidate_job(struct drm_sched_job 
>>>> *s_job,
>>>> -- 
>>>> 2.7.4
>>>>
>>>> _______________________________________________
>>>> amd-gfx mailing list
>>>> amd-gfx@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
       [not found]                     ` <28bb2244-c2bf-3a3f-18e4-df9aef45cd58-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-14 15:14                       ` Grodzovsky, Andrey
  0 siblings, 0 replies; 10+ messages in thread
From: Grodzovsky, Andrey @ 2018-12-14 15:14 UTC (permalink / raw)
  To: Koenig, Christian, Zhou, David(ChunMing),
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Liu, Monk

Just a reminder. Any new comments in light of all the discussion ?

Andrey


On 12/12/2018 08:08 AM, Grodzovsky, Andrey wrote:
> BTW, the problem I pointed out with drm_sched_entity_kill_jobs_cb is not
> an issue with this patch set since it removes the cb from
> s_fence->finished in general so we only free the job once - directly
> from drm_sched_entity_kill_jobs_cb.
>
> Andrey
>
>
> On 12/11/2018 11:20 AM, Christian König wrote:
>> Yeah, completely correct explained.
>>
>> I was unfortunately really busy today, but going to give that a look
>> as soon as I have time.
>>
>> Christian.
>>
>> Am 11.12.18 um 17:01 schrieb Grodzovsky, Andrey:
>>> A I understand you say that by the time the fence callback runs the job
>>> might have already been released,
>>>
>>> but how so if the job gets released from drm_sched_job_finish work
>>> handler in the normal flow - so, after the HW
>>>
>>> fence (s_fence->parent) cb is executed. Other 2 flows are error use
>>> cases where amdgpu_job_free is called directly in which
>>>
>>> cases I assume the job wasn't submitted to HW. Last flow I see is
>>> drm_sched_entity_kill_jobs_cb and here I actually see a problem
>>>
>>> with the code as it's today - drm_sched_fence_finished is called which
>>> will trigger s_fence->finished callback to run which today
>>>
>>> schedules drm_sched_job_finish which releases the job, but we don't even
>>> wait for that and call free_job cb directly from
>>>
>>> after that which seems wrong to me.
>>>
>>> Andrey
>>>
>>>
>>> On 12/10/2018 09:45 PM, Zhou, David(ChunMing) wrote:
>>>> I don't think adding cb to sched job would work as soon as their
>>>> lifetime is different with fence.
>>>> Unless you make the sched job reference, otherwise we will get
>>>> trouble sooner or later.
>>>>
>>>> -David
>>>>
>>>>> -----Original Message-----
>>>>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
>>>>> Andrey Grodzovsky
>>>>> Sent: Tuesday, December 11, 2018 5:44 AM
>>>>> To: dri-devel@lists.freedesktop.org; amd-gfx@lists.freedesktop.org;
>>>>> ckoenig.leichtzumerken@gmail.com; eric@anholt.net;
>>>>> etnaviv@lists.freedesktop.org
>>>>> Cc: Zhou, David(ChunMing) <David1.Zhou@amd.com>; Liu, Monk
>>>>> <Monk.Liu@amd.com>; Grodzovsky, Andrey
>>>>> <Andrey.Grodzovsky@amd.com>
>>>>> Subject: [PATCH v3 2/2] drm/sched: Rework HW fence processing.
>>>>>
>>>>> Expedite job deletion from ring mirror list to the HW fence signal
>>>>> callback
>>>>> instead from finish_work, together with waiting for all such fences
>>>>> to signal in
>>>>> drm_sched_stop we garantee that already signaled job will not be
>>>>> processed
>>>>> twice.
>>>>> Remove the sched finish fence callback and just submit finish_work
>>>>> directly
>>>>> from the HW fence callback.
>>>>>
>>>>> v2: Fix comments.
>>>>>
>>>>> v3: Attach  hw fence cb to sched_job
>>>>>
>>>>> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
>>>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>>>> ---
>>>>>     drivers/gpu/drm/scheduler/sched_main.c | 58
>>>>> ++++++++++++++++----------
>>>>> --------
>>>>>     include/drm/gpu_scheduler.h            |  6 ++--
>>>>>     2 files changed, 30 insertions(+), 34 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
>>>>> b/drivers/gpu/drm/scheduler/sched_main.c
>>>>> index cdf95e2..f0c1f32 100644
>>>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>>>> @@ -284,8 +284,6 @@ static void drm_sched_job_finish(struct
>>>>> work_struct
>>>>> *work)
>>>>>         cancel_delayed_work_sync(&sched->work_tdr);
>>>>>
>>>>>         spin_lock_irqsave(&sched->job_list_lock, flags);
>>>>> -    /* remove job from ring_mirror_list */
>>>>> -    list_del_init(&s_job->node);
>>>>>         /* queue TDR for next job */
>>>>>         drm_sched_start_timeout(sched);
>>>>>         spin_unlock_irqrestore(&sched->job_list_lock, flags); @@
>>>>> -293,22
>>>>> +291,11 @@ static void drm_sched_job_finish(struct work_struct *work)
>>>>>         sched->ops->free_job(s_job);
>>>>>     }
>>>>>
>>>>> -static void drm_sched_job_finish_cb(struct dma_fence *f,
>>>>> -                    struct dma_fence_cb *cb)
>>>>> -{
>>>>> -    struct drm_sched_job *job = container_of(cb, struct
>>>>> drm_sched_job,
>>>>> -                         finish_cb);
>>>>> -    schedule_work(&job->finish_work);
>>>>> -}
>>>>> -
>>>>>     static void drm_sched_job_begin(struct drm_sched_job *s_job)  {
>>>>>         struct drm_gpu_scheduler *sched = s_job->sched;
>>>>>         unsigned long flags;
>>>>>
>>>>> - dma_fence_add_callback(&s_job->s_fence->finished, &s_job-
>>>>>> finish_cb,
>>>>> -                   drm_sched_job_finish_cb);
>>>>> -
>>>>>         spin_lock_irqsave(&sched->job_list_lock, flags);
>>>>>         list_add_tail(&s_job->node, &sched->ring_mirror_list);
>>>>>         drm_sched_start_timeout(sched);
>>>>> @@ -359,12 +346,11 @@ void drm_sched_stop(struct drm_gpu_scheduler
>>>>> *sched, struct drm_sched_job *bad,
>>>>>         list_for_each_entry_reverse(s_job, &sched->ring_mirror_list,
>>>>> node)
>>>>> {
>>>>>             if (s_job->s_fence->parent &&
>>>>> dma_fence_remove_callback(s_job->s_fence->parent,
>>>>> -                          &s_job->s_fence->cb)) {
>>>>> +                          &s_job->cb)) {
>>>>>                 dma_fence_put(s_job->s_fence->parent);
>>>>>                 s_job->s_fence->parent = NULL;
>>>>>                 atomic_dec(&sched->hw_rq_count);
>>>>> -        }
>>>>> -        else {
>>>>> +        } else {
>>>>>                 /* TODO Is it get/put neccessey here ? */
>>>>> dma_fence_get(&s_job->s_fence->finished);
>>>>>                 list_add(&s_job->finish_node, &wait_list); @@ -
>>>>> 417,31 +403,34 @@ EXPORT_SYMBOL(drm_sched_stop);  void
>>>>> drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)  {
>>>>>         struct drm_sched_job *s_job, *tmp;
>>>>> -    unsigned long flags;
>>>>>         int r;
>>>>>
>>>>>         if (unpark_only)
>>>>>             goto unpark;
>>>>>
>>>>> -    spin_lock_irqsave(&sched->job_list_lock, flags);
>>>>> +    /*
>>>>> +     * Locking the list is not required here as the sched thread
>>>>> is parked
>>>>> +     * so no new jobs are being pushed in to HW and in drm_sched_stop
>>>>> we
>>>>> +     * flushed all the jobs who were still in mirror list but who
>>>>> already
>>>>> +     * signaled and removed them self from the list. Also concurrent
>>>>> +     * GPU recovers can't run in parallel.
>>>>> +     */
>>>>>         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list,
>>>>> node) {
>>>>> -        struct drm_sched_fence *s_fence = s_job->s_fence;
>>>>>             struct dma_fence *fence = s_job->s_fence->parent;
>>>>>
>>>>>             if (fence) {
>>>>> -            r = dma_fence_add_callback(fence, &s_fence->cb,
>>>>> +            r = dma_fence_add_callback(fence, &s_job->cb,
>>>>>                                drm_sched_process_job);
>>>>>                 if (r == -ENOENT)
>>>>> -                drm_sched_process_job(fence, &s_fence-
>>>>>> cb);
>>>>> +                drm_sched_process_job(fence, &s_job->cb);
>>>>>                 else if (r)
>>>>>                     DRM_ERROR("fence add callback failed
>>>>> (%d)\n",
>>>>>                           r);
>>>>>             } else
>>>>> -            drm_sched_process_job(NULL, &s_fence->cb);
>>>>> +            drm_sched_process_job(NULL, &s_job->cb);
>>>>>         }
>>>>>
>>>>>         drm_sched_start_timeout(sched);
>>>>> -    spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>>>>
>>>>>     unpark:
>>>>>         kthread_unpark(sched->thread);
>>>>> @@ -590,18 +579,27 @@ drm_sched_select_entity(struct
>>>>> drm_gpu_scheduler *sched)
>>>>>      */
>>>>>     static void drm_sched_process_job(struct dma_fence *f, struct
>>>>> dma_fence_cb *cb)  {
>>>>> -    struct drm_sched_fence *s_fence =
>>>>> -        container_of(cb, struct drm_sched_fence, cb);
>>>>> +    struct drm_sched_job *s_job = container_of(cb, struct
>>>>> drm_sched_job, cb);
>>>>> +    struct drm_sched_fence *s_fence = s_job->s_fence;
>>>>>         struct drm_gpu_scheduler *sched = s_fence->sched;
>>>>> +    unsigned long flags;
>>>>> +
>>>>> +    cancel_delayed_work(&sched->work_tdr);
>>>>>
>>>>> -    dma_fence_get(&s_fence->finished);
>>>>>         atomic_dec(&sched->hw_rq_count);
>>>>>         atomic_dec(&sched->num_jobs);
>>>>> +
>>>>> +    spin_lock_irqsave(&sched->job_list_lock, flags);
>>>>> +    /* remove job from ring_mirror_list */
>>>>> +    list_del_init(&s_job->node);
>>>>> +    spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>>>> +
>>>>>         drm_sched_fence_finished(s_fence);
>>>>>
>>>>>         trace_drm_sched_process_job(s_fence);
>>>>> -    dma_fence_put(&s_fence->finished);
>>>>>         wake_up_interruptible(&sched->wake_up_worker);
>>>>> +
>>>>> +    schedule_work(&s_job->finish_work);
>>>>>     }
>>>>>
>>>>>     /**
>>>>> @@ -664,16 +662,16 @@ static int drm_sched_main(void *param)
>>>>>
>>>>>             if (fence) {
>>>>>                 s_fence->parent = dma_fence_get(fence);
>>>>> -            r = dma_fence_add_callback(fence, &s_fence->cb,
>>>>> +            r = dma_fence_add_callback(fence, &sched_job->cb,
>>>>>                                drm_sched_process_job);
>>>>>                 if (r == -ENOENT)
>>>>> -                drm_sched_process_job(fence, &s_fence-
>>>>>> cb);
>>>>> +                drm_sched_process_job(fence, &sched_job-
>>>>>> cb);
>>>>>                 else if (r)
>>>>>                     DRM_ERROR("fence add callback failed
>>>>> (%d)\n",
>>>>>                           r);
>>>>>                 dma_fence_put(fence);
>>>>>             } else
>>>>> -            drm_sched_process_job(NULL, &s_fence->cb);
>>>>> +            drm_sched_process_job(NULL, &sched_job->cb);
>>>>>
>>>>>             wake_up(&sched->job_scheduled);
>>>>>         }
>>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>>> index c94b592..f29aa1c 100644
>>>>> --- a/include/drm/gpu_scheduler.h
>>>>> +++ b/include/drm/gpu_scheduler.h
>>>>> @@ -138,10 +138,6 @@ struct drm_sched_fence {
>>>>>         struct dma_fence        finished;
>>>>>
>>>>>             /**
>>>>> -         * @cb: the callback for the parent fence below.
>>>>> -         */
>>>>> -    struct dma_fence_cb        cb;
>>>>> -        /**
>>>>>              * @parent: the fence returned by
>>>>> &drm_sched_backend_ops.run_job
>>>>>              * when scheduling the job on hardware. We signal the
>>>>>              * &drm_sched_fence.finished fence once parent is
>>>>> signalled.
>>>>> @@ -182,6 +178,7 @@ struct drm_sched_fence
>>>>> *to_drm_sched_fence(struct dma_fence *f);
>>>>>      *         be scheduled further.
>>>>>      * @s_priority: the priority of the job.
>>>>>      * @entity: the entity to which this job belongs.
>>>>> + * @cb: the callback for the parent fence in s_fence.
>>>>>      *
>>>>>      * A job is created by the driver using drm_sched_job_init(), and
>>>>>      * should call drm_sched_entity_push_job() once it wants the
>>>>> scheduler
>>>>> @@ -199,6 +196,7 @@ struct drm_sched_job {
>>>>>         atomic_t            karma;
>>>>>         enum drm_sched_priority        s_priority;
>>>>>         struct drm_sched_entity  *entity;
>>>>> +    struct dma_fence_cb        cb;
>>>>>     };
>>>>>
>>>>>     static inline bool drm_sched_invalidate_job(struct drm_sched_job
>>>>> *s_job,
>>>>> -- 
>>>>> 2.7.4
>>>>>
>>>>> _______________________________________________
>>>>> amd-gfx mailing list
>>>>> amd-gfx@lists.freedesktop.org
>>>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling.
       [not found] ` <1544478238-13310-1-git-send-email-andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>
  2018-12-10 21:43   ` [PATCH v3 2/2] drm/sched: Rework HW fence processing Andrey Grodzovsky
@ 2018-12-17 15:27   ` Christian König
  2018-12-17 16:57     ` Grodzovsky, Andrey
  1 sibling, 1 reply; 10+ messages in thread
From: Christian König @ 2018-12-17 15:27 UTC (permalink / raw)
  To: Andrey Grodzovsky, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: david1.zhou-5C7GfCeVMHo, Monk.Liu-5C7GfCeVMHo

Am 10.12.18 um 22:43 schrieb Andrey Grodzovsky:
> Decauple sched threads stop and start and ring mirror
> list handling from the policy of what to do about the
> guilty jobs.
> When stoppping the sched thread and detaching sched fences
> from non signaled HW fenes wait for all signaled HW fences
> to complete before rerunning the jobs.
>
> v2: Fix resubmission of guilty job into HW after refactoring.
>
> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  17 +++--
>   drivers/gpu/drm/etnaviv/etnaviv_sched.c    |   8 +--
>   drivers/gpu/drm/scheduler/sched_main.c     | 110 ++++++++++++++++++-----------
>   drivers/gpu/drm/v3d/v3d_sched.c            |  11 +--
>   include/drm/gpu_scheduler.h                |  10 ++-
>   5 files changed, 95 insertions(+), 61 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index ef36cc5..42111d5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3292,17 +3292,16 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
>   	/* block all schedulers and reset given job's ring */
>   	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>   		struct amdgpu_ring *ring = adev->rings[i];
> +		bool park_only = job && job->base.sched != &ring->sched;
>   
>   		if (!ring || !ring->sched.thread)
>   			continue;
>   
> -		kthread_park(ring->sched.thread);
> +		drm_sched_stop(&ring->sched, job ? &job->base : NULL, park_only);
>   
> -		if (job && job->base.sched != &ring->sched)
> +		if (park_only)
>   			continue;
>   
> -		drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
> -
>   		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
>   		amdgpu_fence_driver_force_completion(ring);
>   	}
> @@ -3445,6 +3444,7 @@ static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
>   					  struct amdgpu_job *job)
>   {
>   	int i;
> +	bool unpark_only;
>   
>   	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>   		struct amdgpu_ring *ring = adev->rings[i];
> @@ -3456,10 +3456,13 @@ static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
>   		 * or all rings (in the case @job is NULL)
>   		 * after above amdgpu_reset accomplished
>   		 */
> -		if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
> -			drm_sched_job_recovery(&ring->sched);
> +		unpark_only = (job && job->base.sched != &ring->sched) ||
> +			       adev->asic_reset_res;
> +
> +		if (!unpark_only)
> +			drm_sched_resubmit_jobs(&ring->sched);
>   
> -		kthread_unpark(ring->sched.thread);
> +		drm_sched_start(&ring->sched, unpark_only);
>   	}
>   
>   	if (!amdgpu_device_has_dc_support(adev)) {
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index 49a6763..fab3b51 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -109,16 +109,16 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
>   	}
>   
>   	/* block scheduler */
> -	kthread_park(gpu->sched.thread);
> -	drm_sched_hw_job_reset(&gpu->sched, sched_job);
> +	drm_sched_stop(&gpu->sched, sched_job, false);
>   
>   	/* get the GPU back into the init state */
>   	etnaviv_core_dump(gpu);
>   	etnaviv_gpu_recover_hang(gpu);
>   
> +	drm_sched_resubmit_jobs(&gpu->sched);
> +
>   	/* restart scheduler after GPU is usable again */
> -	drm_sched_job_recovery(&gpu->sched);
> -	kthread_unpark(gpu->sched.thread);
> +	drm_sched_start(&gpu->sched);
>   }
>   
>   static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index dbb6906..cdf95e2 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -60,8 +60,6 @@
>   
>   static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
>   
> -static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
> -
>   /**
>    * drm_sched_rq_init - initialize a given run queue struct
>    *
> @@ -342,13 +340,21 @@ static void drm_sched_job_timedout(struct work_struct *work)
>    * @bad: bad scheduler job
>    *
>    */
> -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
> +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad,
> +		    bool park_only)
>   {
>   	struct drm_sched_job *s_job;
>   	struct drm_sched_entity *entity, *tmp;
>   	unsigned long flags;
> +	struct list_head wait_list;
>   	int i;
>   
> +	kthread_park(sched->thread);
> +	if (park_only)
> +		return;

Removing the callback needs to be done for all engines, not just the one 
where the bad job is on.

> +
> +	INIT_LIST_HEAD(&wait_list);
> +
>   	spin_lock_irqsave(&sched->job_list_lock, flags);
>   	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
>   		if (s_job->s_fence->parent &&
> @@ -358,9 +364,24 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
>   			s_job->s_fence->parent = NULL;
>   			atomic_dec(&sched->hw_rq_count);
>   		}
> +		else {
> +			/* TODO Is it get/put neccessey here ? */
> +			dma_fence_get(&s_job->s_fence->finished);
> +			list_add(&s_job->finish_node, &wait_list);
> +		}
>   	}
>   	spin_unlock_irqrestore(&sched->job_list_lock, flags);
>   
> +	/*
> +	 * Verify all the signaled jobs in mirror list are removed from the ring
> +	 * We rely on the fact that any finish_work in progress will wait for this
> +	 * handler to complete before releasing all of the jobs we iterate.
> +	 */
> +	list_for_each_entry(s_job, &wait_list, finish_node) {
> +		dma_fence_wait(&s_job->s_fence->finished, false);
> +		dma_fence_put(&s_job->s_fence->finished);
> +	}
> +

Increasing a bad jobs karma should be a separate function.

Christian.

>   	if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
>   		atomic_inc(&bad->karma);
>   		/* don't increase @bad's karma if it's from KERNEL RQ,
> @@ -385,7 +406,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
>   		}
>   	}
>   }
> -EXPORT_SYMBOL(drm_sched_hw_job_reset);
> +EXPORT_SYMBOL(drm_sched_stop);
>   
>   /**
>    * drm_sched_job_recovery - recover jobs after a reset
> @@ -393,33 +414,21 @@ EXPORT_SYMBOL(drm_sched_hw_job_reset);
>    * @sched: scheduler instance
>    *
>    */
> -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
> +void drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)
>   {
>   	struct drm_sched_job *s_job, *tmp;
> -	bool found_guilty = false;
>   	unsigned long flags;
>   	int r;
>   
> -	spin_lock_irqsave(&sched->job_list_lock, flags);
> +	if (unpark_only)
> +		goto unpark;
> +
> +	spin_lock_irqsave(&sched->job_list_lock, flags);	
>   	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
>   		struct drm_sched_fence *s_fence = s_job->s_fence;
> -		struct dma_fence *fence;
> -		uint64_t guilty_context;
> -
> -		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
> -			found_guilty = true;
> -			guilty_context = s_job->s_fence->scheduled.context;
> -		}
> -
> -		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
> -			dma_fence_set_error(&s_fence->finished, -ECANCELED);
> -
> -		spin_unlock_irqrestore(&sched->job_list_lock, flags);
> -		fence = sched->ops->run_job(s_job);
> -		atomic_inc(&sched->hw_rq_count);
> +		struct dma_fence *fence = s_job->s_fence->parent;
>   
>   		if (fence) {
> -			s_fence->parent = dma_fence_get(fence);
>   			r = dma_fence_add_callback(fence, &s_fence->cb,
>   						   drm_sched_process_job);
>   			if (r == -ENOENT)
> @@ -427,18 +436,47 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
>   			else if (r)
>   				DRM_ERROR("fence add callback failed (%d)\n",
>   					  r);
> -			dma_fence_put(fence);
> -		} else {
> -			if (s_fence->finished.error < 0)
> -				drm_sched_expel_job_unlocked(s_job);
> +		} else
>   			drm_sched_process_job(NULL, &s_fence->cb);
> -		}
> -		spin_lock_irqsave(&sched->job_list_lock, flags);
>   	}
> +
>   	drm_sched_start_timeout(sched);
>   	spin_unlock_irqrestore(&sched->job_list_lock, flags);
> +
> +unpark:
> +	kthread_unpark(sched->thread);
>   }
> -EXPORT_SYMBOL(drm_sched_job_recovery);
> +EXPORT_SYMBOL(drm_sched_start);
> +
> +/**
> + * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
> + *
> + * @sched: scheduler instance
> + *
> + */
> +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
> +{
> +	struct drm_sched_job *s_job, *tmp;
> +	uint64_t guilty_context;
> +	bool found_guilty = false;
> +
> +	/*TODO DO we need spinlock here ? */
> +	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
> +		struct drm_sched_fence *s_fence = s_job->s_fence;
> +
> +		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
> +			found_guilty = true;
> +			guilty_context = s_job->s_fence->scheduled.context;
> +		}
> +
> +		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
> +			dma_fence_set_error(&s_fence->finished, -ECANCELED);
> +
> +		s_job->s_fence->parent = sched->ops->run_job(s_job);
> +		atomic_inc(&sched->hw_rq_count);
> +	}
> +}
> +EXPORT_SYMBOL(drm_sched_resubmit_jobs);
>   
>   /**
>    * drm_sched_job_init - init a scheduler job
> @@ -634,26 +672,14 @@ static int drm_sched_main(void *param)
>   				DRM_ERROR("fence add callback failed (%d)\n",
>   					  r);
>   			dma_fence_put(fence);
> -		} else {
> -			if (s_fence->finished.error < 0)
> -				drm_sched_expel_job_unlocked(sched_job);
> +		} else
>   			drm_sched_process_job(NULL, &s_fence->cb);
> -		}
>   
>   		wake_up(&sched->job_scheduled);
>   	}
>   	return 0;
>   }
>   
> -static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
> -{
> -	struct drm_gpu_scheduler *sched = s_job->sched;
> -
> -	spin_lock(&sched->job_list_lock);
> -	list_del_init(&s_job->node);
> -	spin_unlock(&sched->job_list_lock);
> -}
> -
>   /**
>    * drm_sched_init - Init a gpu scheduler instance
>    *
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index 445b2ef..f99346a 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -178,18 +178,19 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
>   	for (q = 0; q < V3D_MAX_QUEUES; q++) {
>   		struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
>   
> -		kthread_park(sched->thread);
> -		drm_sched_hw_job_reset(sched, (sched_job->sched == sched ?
> -					       sched_job : NULL));
> +		drm_sched_stop(sched, (sched_job->sched == sched ?
> +					       sched_job : NULL), false);
>   	}
>   
>   	/* get the GPU back into the init state */
>   	v3d_reset(v3d);
>   
> +	for (q = 0; q < V3D_MAX_QUEUES; q++)
> +		drm_sched_resubmit_jobs(sched_job->sched);
> +
>   	/* Unblock schedulers and restart their jobs. */
>   	for (q = 0; q < V3D_MAX_QUEUES; q++) {
> -		drm_sched_job_recovery(&v3d->queue[q].sched);
> -		kthread_unpark(v3d->queue[q].sched.thread);
> +		drm_sched_start(&v3d->queue[q].sched, false);
>   	}
>   
>   	mutex_unlock(&v3d->reset_lock);
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 47e1979..c94b592 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -175,6 +175,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>    *               finished to remove the job from the
>    *               @drm_gpu_scheduler.ring_mirror_list.
>    * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
> + * @finish_node: used in a list to wait on before resetting the scheduler
>    * @id: a unique id assigned to each job scheduled on the scheduler.
>    * @karma: increment on every hang caused by this job. If this exceeds the hang
>    *         limit of the scheduler then the job is marked guilty and will not
> @@ -193,6 +194,7 @@ struct drm_sched_job {
>   	struct dma_fence_cb		finish_cb;
>   	struct work_struct		finish_work;
>   	struct list_head		node;
> +	struct list_head		finish_node;
>   	uint64_t			id;
>   	atomic_t			karma;
>   	enum drm_sched_priority		s_priority;
> @@ -298,9 +300,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
>   		       void *owner);
>   void drm_sched_job_cleanup(struct drm_sched_job *job);
>   void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
> -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
> -			    struct drm_sched_job *job);
> -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
> +void drm_sched_stop(struct drm_gpu_scheduler *sched,
> +		    struct drm_sched_job *job,
> +		    bool park_only);
> +void drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only);
> +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
>   bool drm_sched_dependency_optimized(struct dma_fence* fence,
>   				    struct drm_sched_entity *entity);
>   void drm_sched_fault(struct drm_gpu_scheduler *sched);

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling.
  2018-12-17 15:27   ` [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling Christian König
@ 2018-12-17 16:57     ` Grodzovsky, Andrey
       [not found]       ` <975594c7-3f8f-60f1-77b2-98d23b1a3646-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Grodzovsky, Andrey @ 2018-12-17 16:57 UTC (permalink / raw)
  To: Koenig, Christian, dri-devel, amd-gfx, eric, etnaviv; +Cc: Liu, Monk



On 12/17/2018 10:27 AM, Christian König wrote:
> Am 10.12.18 um 22:43 schrieb Andrey Grodzovsky:
>> Decauple sched threads stop and start and ring mirror
>> list handling from the policy of what to do about the
>> guilty jobs.
>> When stoppping the sched thread and detaching sched fences
>> from non signaled HW fenes wait for all signaled HW fences
>> to complete before rerunning the jobs.
>>
>> v2: Fix resubmission of guilty job into HW after refactoring.
>>
>> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  17 +++--
>>   drivers/gpu/drm/etnaviv/etnaviv_sched.c    |   8 +--
>>   drivers/gpu/drm/scheduler/sched_main.c     | 110 
>> ++++++++++++++++++-----------
>>   drivers/gpu/drm/v3d/v3d_sched.c            |  11 +--
>>   include/drm/gpu_scheduler.h                |  10 ++-
>>   5 files changed, 95 insertions(+), 61 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index ef36cc5..42111d5 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -3292,17 +3292,16 @@ static int 
>> amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
>>       /* block all schedulers and reset given job's ring */
>>       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>>           struct amdgpu_ring *ring = adev->rings[i];
>> +        bool park_only = job && job->base.sched != &ring->sched;
>>             if (!ring || !ring->sched.thread)
>>               continue;
>>   -        kthread_park(ring->sched.thread);
>> +        drm_sched_stop(&ring->sched, job ? &job->base : NULL, 
>> park_only);
>>   -        if (job && job->base.sched != &ring->sched)
>> +        if (park_only)
>>               continue;
>>   -        drm_sched_hw_job_reset(&ring->sched, job ? &job->base : 
>> NULL);
>> -
>>           /* after all hw jobs are reset, hw fence is meaningless, so 
>> force_completion */
>>           amdgpu_fence_driver_force_completion(ring);
>>       }
>> @@ -3445,6 +3444,7 @@ static void 
>> amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
>>                         struct amdgpu_job *job)
>>   {
>>       int i;
>> +    bool unpark_only;
>>         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>>           struct amdgpu_ring *ring = adev->rings[i];
>> @@ -3456,10 +3456,13 @@ static void 
>> amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
>>            * or all rings (in the case @job is NULL)
>>            * after above amdgpu_reset accomplished
>>            */
>> -        if ((!job || job->base.sched == &ring->sched) && 
>> !adev->asic_reset_res)
>> -            drm_sched_job_recovery(&ring->sched);
>> +        unpark_only = (job && job->base.sched != &ring->sched) ||
>> +                   adev->asic_reset_res;
>> +
>> +        if (!unpark_only)
>> +            drm_sched_resubmit_jobs(&ring->sched);
>>   -        kthread_unpark(ring->sched.thread);
>> +        drm_sched_start(&ring->sched, unpark_only);
>>       }
>>         if (!amdgpu_device_has_dc_support(adev)) {
>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c 
>> b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>> index 49a6763..fab3b51 100644
>> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>> @@ -109,16 +109,16 @@ static void etnaviv_sched_timedout_job(struct 
>> drm_sched_job *sched_job)
>>       }
>>         /* block scheduler */
>> -    kthread_park(gpu->sched.thread);
>> -    drm_sched_hw_job_reset(&gpu->sched, sched_job);
>> +    drm_sched_stop(&gpu->sched, sched_job, false);
>>         /* get the GPU back into the init state */
>>       etnaviv_core_dump(gpu);
>>       etnaviv_gpu_recover_hang(gpu);
>>   +    drm_sched_resubmit_jobs(&gpu->sched);
>> +
>>       /* restart scheduler after GPU is usable again */
>> -    drm_sched_job_recovery(&gpu->sched);
>> -    kthread_unpark(gpu->sched.thread);
>> +    drm_sched_start(&gpu->sched);
>>   }
>>     static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
>> b/drivers/gpu/drm/scheduler/sched_main.c
>> index dbb6906..cdf95e2 100644
>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>> @@ -60,8 +60,6 @@
>>     static void drm_sched_process_job(struct dma_fence *f, struct 
>> dma_fence_cb *cb);
>>   -static void drm_sched_expel_job_unlocked(struct drm_sched_job 
>> *s_job);
>> -
>>   /**
>>    * drm_sched_rq_init - initialize a given run queue struct
>>    *
>> @@ -342,13 +340,21 @@ static void drm_sched_job_timedout(struct 
>> work_struct *work)
>>    * @bad: bad scheduler job
>>    *
>>    */
>> -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct 
>> drm_sched_job *bad)
>> +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct 
>> drm_sched_job *bad,
>> +            bool park_only)
>>   {
>>       struct drm_sched_job *s_job;
>>       struct drm_sched_entity *entity, *tmp;
>>       unsigned long flags;
>> +    struct list_head wait_list;
>>       int i;
>>   +    kthread_park(sched->thread);
>> +    if (park_only)
>> +        return;
>
> Removing the callback needs to be done for all engines, not just the 
> one where the bad job is on.

Is it because you assume that during full GPU reset we might kill 
healthy jobs who were still in progress in HW pipe and so we need to 
manually recover them
just as we do with the guilty job's engine ?

Andrey

>
>> +
>> +    INIT_LIST_HEAD(&wait_list);
>> +
>>       spin_lock_irqsave(&sched->job_list_lock, flags);
>>       list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, 
>> node) {
>>           if (s_job->s_fence->parent &&
>> @@ -358,9 +364,24 @@ void drm_sched_hw_job_reset(struct 
>> drm_gpu_scheduler *sched, struct drm_sched_jo
>>               s_job->s_fence->parent = NULL;
>>               atomic_dec(&sched->hw_rq_count);
>>           }
>> +        else {
>> +            /* TODO Is it get/put neccessey here ? */
>> +            dma_fence_get(&s_job->s_fence->finished);
>> +            list_add(&s_job->finish_node, &wait_list);
>> +        }
>>       }
>>       spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>   +    /*
>> +     * Verify all the signaled jobs in mirror list are removed from 
>> the ring
>> +     * We rely on the fact that any finish_work in progress will 
>> wait for this
>> +     * handler to complete before releasing all of the jobs we iterate.
>> +     */
>> +    list_for_each_entry(s_job, &wait_list, finish_node) {
>> +        dma_fence_wait(&s_job->s_fence->finished, false);
>> +        dma_fence_put(&s_job->s_fence->finished);
>> +    }
>> +
>
> Increasing a bad jobs karma should be a separate function.
>
> Christian.
>
>>       if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
>>           atomic_inc(&bad->karma);
>>           /* don't increase @bad's karma if it's from KERNEL RQ,
>> @@ -385,7 +406,7 @@ void drm_sched_hw_job_reset(struct 
>> drm_gpu_scheduler *sched, struct drm_sched_jo
>>           }
>>       }
>>   }
>> -EXPORT_SYMBOL(drm_sched_hw_job_reset);
>> +EXPORT_SYMBOL(drm_sched_stop);
>>     /**
>>    * drm_sched_job_recovery - recover jobs after a reset
>> @@ -393,33 +414,21 @@ EXPORT_SYMBOL(drm_sched_hw_job_reset);
>>    * @sched: scheduler instance
>>    *
>>    */
>> -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
>> +void drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)
>>   {
>>       struct drm_sched_job *s_job, *tmp;
>> -    bool found_guilty = false;
>>       unsigned long flags;
>>       int r;
>>   -    spin_lock_irqsave(&sched->job_list_lock, flags);
>> +    if (unpark_only)
>> +        goto unpark;
>> +
>> +    spin_lock_irqsave(&sched->job_list_lock, flags);
>>       list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, 
>> node) {
>>           struct drm_sched_fence *s_fence = s_job->s_fence;
>> -        struct dma_fence *fence;
>> -        uint64_t guilty_context;
>> -
>> -        if (!found_guilty && atomic_read(&s_job->karma) > 
>> sched->hang_limit) {
>> -            found_guilty = true;
>> -            guilty_context = s_job->s_fence->scheduled.context;
>> -        }
>> -
>> -        if (found_guilty && s_job->s_fence->scheduled.context == 
>> guilty_context)
>> -            dma_fence_set_error(&s_fence->finished, -ECANCELED);
>> -
>> -        spin_unlock_irqrestore(&sched->job_list_lock, flags);
>> -        fence = sched->ops->run_job(s_job);
>> -        atomic_inc(&sched->hw_rq_count);
>> +        struct dma_fence *fence = s_job->s_fence->parent;
>>             if (fence) {
>> -            s_fence->parent = dma_fence_get(fence);
>>               r = dma_fence_add_callback(fence, &s_fence->cb,
>>                              drm_sched_process_job);
>>               if (r == -ENOENT)
>> @@ -427,18 +436,47 @@ void drm_sched_job_recovery(struct 
>> drm_gpu_scheduler *sched)
>>               else if (r)
>>                   DRM_ERROR("fence add callback failed (%d)\n",
>>                         r);
>> -            dma_fence_put(fence);
>> -        } else {
>> -            if (s_fence->finished.error < 0)
>> -                drm_sched_expel_job_unlocked(s_job);
>> +        } else
>>               drm_sched_process_job(NULL, &s_fence->cb);
>> -        }
>> -        spin_lock_irqsave(&sched->job_list_lock, flags);
>>       }
>> +
>>       drm_sched_start_timeout(sched);
>>       spin_unlock_irqrestore(&sched->job_list_lock, flags);
>> +
>> +unpark:
>> +    kthread_unpark(sched->thread);
>>   }
>> -EXPORT_SYMBOL(drm_sched_job_recovery);
>> +EXPORT_SYMBOL(drm_sched_start);
>> +
>> +/**
>> + * drm_sched_resubmit_jobs - helper to relunch job from mirror ring 
>> list
>> + *
>> + * @sched: scheduler instance
>> + *
>> + */
>> +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
>> +{
>> +    struct drm_sched_job *s_job, *tmp;
>> +    uint64_t guilty_context;
>> +    bool found_guilty = false;
>> +
>> +    /*TODO DO we need spinlock here ? */
>> +    list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, 
>> node) {
>> +        struct drm_sched_fence *s_fence = s_job->s_fence;
>> +
>> +        if (!found_guilty && atomic_read(&s_job->karma) > 
>> sched->hang_limit) {
>> +            found_guilty = true;
>> +            guilty_context = s_job->s_fence->scheduled.context;
>> +        }
>> +
>> +        if (found_guilty && s_job->s_fence->scheduled.context == 
>> guilty_context)
>> +            dma_fence_set_error(&s_fence->finished, -ECANCELED);
>> +
>> +        s_job->s_fence->parent = sched->ops->run_job(s_job);
>> +        atomic_inc(&sched->hw_rq_count);
>> +    }
>> +}
>> +EXPORT_SYMBOL(drm_sched_resubmit_jobs);
>>     /**
>>    * drm_sched_job_init - init a scheduler job
>> @@ -634,26 +672,14 @@ static int drm_sched_main(void *param)
>>                   DRM_ERROR("fence add callback failed (%d)\n",
>>                         r);
>>               dma_fence_put(fence);
>> -        } else {
>> -            if (s_fence->finished.error < 0)
>> -                drm_sched_expel_job_unlocked(sched_job);
>> +        } else
>>               drm_sched_process_job(NULL, &s_fence->cb);
>> -        }
>>             wake_up(&sched->job_scheduled);
>>       }
>>       return 0;
>>   }
>>   -static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
>> -{
>> -    struct drm_gpu_scheduler *sched = s_job->sched;
>> -
>> -    spin_lock(&sched->job_list_lock);
>> -    list_del_init(&s_job->node);
>> -    spin_unlock(&sched->job_list_lock);
>> -}
>> -
>>   /**
>>    * drm_sched_init - Init a gpu scheduler instance
>>    *
>> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c 
>> b/drivers/gpu/drm/v3d/v3d_sched.c
>> index 445b2ef..f99346a 100644
>> --- a/drivers/gpu/drm/v3d/v3d_sched.c
>> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
>> @@ -178,18 +178,19 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
>>       for (q = 0; q < V3D_MAX_QUEUES; q++) {
>>           struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
>>   -        kthread_park(sched->thread);
>> -        drm_sched_hw_job_reset(sched, (sched_job->sched == sched ?
>> -                           sched_job : NULL));
>> +        drm_sched_stop(sched, (sched_job->sched == sched ?
>> +                           sched_job : NULL), false);
>>       }
>>         /* get the GPU back into the init state */
>>       v3d_reset(v3d);
>>   +    for (q = 0; q < V3D_MAX_QUEUES; q++)
>> +        drm_sched_resubmit_jobs(sched_job->sched);
>> +
>>       /* Unblock schedulers and restart their jobs. */
>>       for (q = 0; q < V3D_MAX_QUEUES; q++) {
>> -        drm_sched_job_recovery(&v3d->queue[q].sched);
>> -        kthread_unpark(v3d->queue[q].sched.thread);
>> +        drm_sched_start(&v3d->queue[q].sched, false);
>>       }
>>         mutex_unlock(&v3d->reset_lock);
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index 47e1979..c94b592 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -175,6 +175,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct 
>> dma_fence *f);
>>    *               finished to remove the job from the
>>    *               @drm_gpu_scheduler.ring_mirror_list.
>>    * @node: used to append this struct to the 
>> @drm_gpu_scheduler.ring_mirror_list.
>> + * @finish_node: used in a list to wait on before resetting the 
>> scheduler
>>    * @id: a unique id assigned to each job scheduled on the scheduler.
>>    * @karma: increment on every hang caused by this job. If this 
>> exceeds the hang
>>    *         limit of the scheduler then the job is marked guilty and 
>> will not
>> @@ -193,6 +194,7 @@ struct drm_sched_job {
>>       struct dma_fence_cb        finish_cb;
>>       struct work_struct        finish_work;
>>       struct list_head        node;
>> +    struct list_head        finish_node;
>>       uint64_t            id;
>>       atomic_t            karma;
>>       enum drm_sched_priority        s_priority;
>> @@ -298,9 +300,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
>>                  void *owner);
>>   void drm_sched_job_cleanup(struct drm_sched_job *job);
>>   void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>> -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
>> -                struct drm_sched_job *job);
>> -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
>> +void drm_sched_stop(struct drm_gpu_scheduler *sched,
>> +            struct drm_sched_job *job,
>> +            bool park_only);
>> +void drm_sched_start(struct drm_gpu_scheduler *sched, bool 
>> unpark_only);
>> +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
>>   bool drm_sched_dependency_optimized(struct dma_fence* fence,
>>                       struct drm_sched_entity *entity);
>>   void drm_sched_fault(struct drm_gpu_scheduler *sched);
>

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling.
       [not found]       ` <975594c7-3f8f-60f1-77b2-98d23b1a3646-5C7GfCeVMHo@public.gmane.org>
@ 2018-12-17 18:16         ` Koenig, Christian
  0 siblings, 0 replies; 10+ messages in thread
From: Koenig, Christian @ 2018-12-17 18:16 UTC (permalink / raw)
  To: Grodzovsky, Andrey, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	etnaviv-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Zhou, David(ChunMing), Liu, Monk

Am 17.12.18 um 17:57 schrieb Grodzovsky, Andrey:
>
> On 12/17/2018 10:27 AM, Christian König wrote:
>> Am 10.12.18 um 22:43 schrieb Andrey Grodzovsky:
>>> Decauple sched threads stop and start and ring mirror
>>> list handling from the policy of what to do about the
>>> guilty jobs.
>>> When stoppping the sched thread and detaching sched fences
>>> from non signaled HW fenes wait for all signaled HW fences
>>> to complete before rerunning the jobs.
>>>
>>> v2: Fix resubmission of guilty job into HW after refactoring.
>>>
>>> Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>> ---
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  17 +++--
>>>    drivers/gpu/drm/etnaviv/etnaviv_sched.c    |   8 +--
>>>    drivers/gpu/drm/scheduler/sched_main.c     | 110
>>> ++++++++++++++++++-----------
>>>    drivers/gpu/drm/v3d/v3d_sched.c            |  11 +--
>>>    include/drm/gpu_scheduler.h                |  10 ++-
>>>    5 files changed, 95 insertions(+), 61 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index ef36cc5..42111d5 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -3292,17 +3292,16 @@ static int
>>> amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
>>>        /* block all schedulers and reset given job's ring */
>>>        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>>>            struct amdgpu_ring *ring = adev->rings[i];
>>> +        bool park_only = job && job->base.sched != &ring->sched;
>>>              if (!ring || !ring->sched.thread)
>>>                continue;
>>>    -        kthread_park(ring->sched.thread);
>>> +        drm_sched_stop(&ring->sched, job ? &job->base : NULL,
>>> park_only);
>>>    -        if (job && job->base.sched != &ring->sched)
>>> +        if (park_only)
>>>                continue;
>>>    -        drm_sched_hw_job_reset(&ring->sched, job ? &job->base :
>>> NULL);
>>> -
>>>            /* after all hw jobs are reset, hw fence is meaningless, so
>>> force_completion */
>>>            amdgpu_fence_driver_force_completion(ring);
>>>        }
>>> @@ -3445,6 +3444,7 @@ static void
>>> amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
>>>                          struct amdgpu_job *job)
>>>    {
>>>        int i;
>>> +    bool unpark_only;
>>>          for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>>>            struct amdgpu_ring *ring = adev->rings[i];
>>> @@ -3456,10 +3456,13 @@ static void
>>> amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
>>>             * or all rings (in the case @job is NULL)
>>>             * after above amdgpu_reset accomplished
>>>             */
>>> -        if ((!job || job->base.sched == &ring->sched) &&
>>> !adev->asic_reset_res)
>>> -            drm_sched_job_recovery(&ring->sched);
>>> +        unpark_only = (job && job->base.sched != &ring->sched) ||
>>> +                   adev->asic_reset_res;
>>> +
>>> +        if (!unpark_only)
>>> +            drm_sched_resubmit_jobs(&ring->sched);
>>>    -        kthread_unpark(ring->sched.thread);
>>> +        drm_sched_start(&ring->sched, unpark_only);
>>>        }
>>>          if (!amdgpu_device_has_dc_support(adev)) {
>>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>>> b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>>> index 49a6763..fab3b51 100644
>>> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>>> @@ -109,16 +109,16 @@ static void etnaviv_sched_timedout_job(struct
>>> drm_sched_job *sched_job)
>>>        }
>>>          /* block scheduler */
>>> -    kthread_park(gpu->sched.thread);
>>> -    drm_sched_hw_job_reset(&gpu->sched, sched_job);
>>> +    drm_sched_stop(&gpu->sched, sched_job, false);
>>>          /* get the GPU back into the init state */
>>>        etnaviv_core_dump(gpu);
>>>        etnaviv_gpu_recover_hang(gpu);
>>>    +    drm_sched_resubmit_jobs(&gpu->sched);
>>> +
>>>        /* restart scheduler after GPU is usable again */
>>> -    drm_sched_job_recovery(&gpu->sched);
>>> -    kthread_unpark(gpu->sched.thread);
>>> +    drm_sched_start(&gpu->sched);
>>>    }
>>>      static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
>>> b/drivers/gpu/drm/scheduler/sched_main.c
>>> index dbb6906..cdf95e2 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -60,8 +60,6 @@
>>>      static void drm_sched_process_job(struct dma_fence *f, struct
>>> dma_fence_cb *cb);
>>>    -static void drm_sched_expel_job_unlocked(struct drm_sched_job
>>> *s_job);
>>> -
>>>    /**
>>>     * drm_sched_rq_init - initialize a given run queue struct
>>>     *
>>> @@ -342,13 +340,21 @@ static void drm_sched_job_timedout(struct
>>> work_struct *work)
>>>     * @bad: bad scheduler job
>>>     *
>>>     */
>>> -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct
>>> drm_sched_job *bad)
>>> +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct
>>> drm_sched_job *bad,
>>> +            bool park_only)
>>>    {
>>>        struct drm_sched_job *s_job;
>>>        struct drm_sched_entity *entity, *tmp;
>>>        unsigned long flags;
>>> +    struct list_head wait_list;
>>>        int i;
>>>    +    kthread_park(sched->thread);
>>> +    if (park_only)
>>> +        return;
>> Removing the callback needs to be done for all engines, not just the
>> one where the bad job is on.
> Is it because you assume that during full GPU reset we might kill
> healthy jobs who were still in progress in HW pipe and so we need to
> manually recover them
> just as we do with the guilty job's engine ?

Yes, exactly.

Christian.

>
> Andrey
>
>>> +
>>> +    INIT_LIST_HEAD(&wait_list);
>>> +
>>>        spin_lock_irqsave(&sched->job_list_lock, flags);
>>>        list_for_each_entry_reverse(s_job, &sched->ring_mirror_list,
>>> node) {
>>>            if (s_job->s_fence->parent &&
>>> @@ -358,9 +364,24 @@ void drm_sched_hw_job_reset(struct
>>> drm_gpu_scheduler *sched, struct drm_sched_jo
>>>                s_job->s_fence->parent = NULL;
>>>                atomic_dec(&sched->hw_rq_count);
>>>            }
>>> +        else {
>>> +            /* TODO Is it get/put neccessey here ? */
>>> +            dma_fence_get(&s_job->s_fence->finished);
>>> +            list_add(&s_job->finish_node, &wait_list);
>>> +        }
>>>        }
>>>        spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>>    +    /*
>>> +     * Verify all the signaled jobs in mirror list are removed from
>>> the ring
>>> +     * We rely on the fact that any finish_work in progress will
>>> wait for this
>>> +     * handler to complete before releasing all of the jobs we iterate.
>>> +     */
>>> +    list_for_each_entry(s_job, &wait_list, finish_node) {
>>> +        dma_fence_wait(&s_job->s_fence->finished, false);
>>> +        dma_fence_put(&s_job->s_fence->finished);
>>> +    }
>>> +
>> Increasing a bad jobs karma should be a separate function.
>>
>> Christian.
>>
>>>        if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
>>>            atomic_inc(&bad->karma);
>>>            /* don't increase @bad's karma if it's from KERNEL RQ,
>>> @@ -385,7 +406,7 @@ void drm_sched_hw_job_reset(struct
>>> drm_gpu_scheduler *sched, struct drm_sched_jo
>>>            }
>>>        }
>>>    }
>>> -EXPORT_SYMBOL(drm_sched_hw_job_reset);
>>> +EXPORT_SYMBOL(drm_sched_stop);
>>>      /**
>>>     * drm_sched_job_recovery - recover jobs after a reset
>>> @@ -393,33 +414,21 @@ EXPORT_SYMBOL(drm_sched_hw_job_reset);
>>>     * @sched: scheduler instance
>>>     *
>>>     */
>>> -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
>>> +void drm_sched_start(struct drm_gpu_scheduler *sched, bool unpark_only)
>>>    {
>>>        struct drm_sched_job *s_job, *tmp;
>>> -    bool found_guilty = false;
>>>        unsigned long flags;
>>>        int r;
>>>    -    spin_lock_irqsave(&sched->job_list_lock, flags);
>>> +    if (unpark_only)
>>> +        goto unpark;
>>> +
>>> +    spin_lock_irqsave(&sched->job_list_lock, flags);
>>>        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list,
>>> node) {
>>>            struct drm_sched_fence *s_fence = s_job->s_fence;
>>> -        struct dma_fence *fence;
>>> -        uint64_t guilty_context;
>>> -
>>> -        if (!found_guilty && atomic_read(&s_job->karma) >
>>> sched->hang_limit) {
>>> -            found_guilty = true;
>>> -            guilty_context = s_job->s_fence->scheduled.context;
>>> -        }
>>> -
>>> -        if (found_guilty && s_job->s_fence->scheduled.context ==
>>> guilty_context)
>>> -            dma_fence_set_error(&s_fence->finished, -ECANCELED);
>>> -
>>> -        spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>> -        fence = sched->ops->run_job(s_job);
>>> -        atomic_inc(&sched->hw_rq_count);
>>> +        struct dma_fence *fence = s_job->s_fence->parent;
>>>              if (fence) {
>>> -            s_fence->parent = dma_fence_get(fence);
>>>                r = dma_fence_add_callback(fence, &s_fence->cb,
>>>                               drm_sched_process_job);
>>>                if (r == -ENOENT)
>>> @@ -427,18 +436,47 @@ void drm_sched_job_recovery(struct
>>> drm_gpu_scheduler *sched)
>>>                else if (r)
>>>                    DRM_ERROR("fence add callback failed (%d)\n",
>>>                          r);
>>> -            dma_fence_put(fence);
>>> -        } else {
>>> -            if (s_fence->finished.error < 0)
>>> -                drm_sched_expel_job_unlocked(s_job);
>>> +        } else
>>>                drm_sched_process_job(NULL, &s_fence->cb);
>>> -        }
>>> -        spin_lock_irqsave(&sched->job_list_lock, flags);
>>>        }
>>> +
>>>        drm_sched_start_timeout(sched);
>>>        spin_unlock_irqrestore(&sched->job_list_lock, flags);
>>> +
>>> +unpark:
>>> +    kthread_unpark(sched->thread);
>>>    }
>>> -EXPORT_SYMBOL(drm_sched_job_recovery);
>>> +EXPORT_SYMBOL(drm_sched_start);
>>> +
>>> +/**
>>> + * drm_sched_resubmit_jobs - helper to relunch job from mirror ring
>>> list
>>> + *
>>> + * @sched: scheduler instance
>>> + *
>>> + */
>>> +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
>>> +{
>>> +    struct drm_sched_job *s_job, *tmp;
>>> +    uint64_t guilty_context;
>>> +    bool found_guilty = false;
>>> +
>>> +    /*TODO DO we need spinlock here ? */
>>> +    list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list,
>>> node) {
>>> +        struct drm_sched_fence *s_fence = s_job->s_fence;
>>> +
>>> +        if (!found_guilty && atomic_read(&s_job->karma) >
>>> sched->hang_limit) {
>>> +            found_guilty = true;
>>> +            guilty_context = s_job->s_fence->scheduled.context;
>>> +        }
>>> +
>>> +        if (found_guilty && s_job->s_fence->scheduled.context ==
>>> guilty_context)
>>> +            dma_fence_set_error(&s_fence->finished, -ECANCELED);
>>> +
>>> +        s_job->s_fence->parent = sched->ops->run_job(s_job);
>>> +        atomic_inc(&sched->hw_rq_count);
>>> +    }
>>> +}
>>> +EXPORT_SYMBOL(drm_sched_resubmit_jobs);
>>>      /**
>>>     * drm_sched_job_init - init a scheduler job
>>> @@ -634,26 +672,14 @@ static int drm_sched_main(void *param)
>>>                    DRM_ERROR("fence add callback failed (%d)\n",
>>>                          r);
>>>                dma_fence_put(fence);
>>> -        } else {
>>> -            if (s_fence->finished.error < 0)
>>> -                drm_sched_expel_job_unlocked(sched_job);
>>> +        } else
>>>                drm_sched_process_job(NULL, &s_fence->cb);
>>> -        }
>>>              wake_up(&sched->job_scheduled);
>>>        }
>>>        return 0;
>>>    }
>>>    -static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
>>> -{
>>> -    struct drm_gpu_scheduler *sched = s_job->sched;
>>> -
>>> -    spin_lock(&sched->job_list_lock);
>>> -    list_del_init(&s_job->node);
>>> -    spin_unlock(&sched->job_list_lock);
>>> -}
>>> -
>>>    /**
>>>     * drm_sched_init - Init a gpu scheduler instance
>>>     *
>>> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c
>>> b/drivers/gpu/drm/v3d/v3d_sched.c
>>> index 445b2ef..f99346a 100644
>>> --- a/drivers/gpu/drm/v3d/v3d_sched.c
>>> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
>>> @@ -178,18 +178,19 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
>>>        for (q = 0; q < V3D_MAX_QUEUES; q++) {
>>>            struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
>>>    -        kthread_park(sched->thread);
>>> -        drm_sched_hw_job_reset(sched, (sched_job->sched == sched ?
>>> -                           sched_job : NULL));
>>> +        drm_sched_stop(sched, (sched_job->sched == sched ?
>>> +                           sched_job : NULL), false);
>>>        }
>>>          /* get the GPU back into the init state */
>>>        v3d_reset(v3d);
>>>    +    for (q = 0; q < V3D_MAX_QUEUES; q++)
>>> +        drm_sched_resubmit_jobs(sched_job->sched);
>>> +
>>>        /* Unblock schedulers and restart their jobs. */
>>>        for (q = 0; q < V3D_MAX_QUEUES; q++) {
>>> -        drm_sched_job_recovery(&v3d->queue[q].sched);
>>> -        kthread_unpark(v3d->queue[q].sched.thread);
>>> +        drm_sched_start(&v3d->queue[q].sched, false);
>>>        }
>>>          mutex_unlock(&v3d->reset_lock);
>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>> index 47e1979..c94b592 100644
>>> --- a/include/drm/gpu_scheduler.h
>>> +++ b/include/drm/gpu_scheduler.h
>>> @@ -175,6 +175,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct
>>> dma_fence *f);
>>>     *               finished to remove the job from the
>>>     *               @drm_gpu_scheduler.ring_mirror_list.
>>>     * @node: used to append this struct to the
>>> @drm_gpu_scheduler.ring_mirror_list.
>>> + * @finish_node: used in a list to wait on before resetting the
>>> scheduler
>>>     * @id: a unique id assigned to each job scheduled on the scheduler.
>>>     * @karma: increment on every hang caused by this job. If this
>>> exceeds the hang
>>>     *         limit of the scheduler then the job is marked guilty and
>>> will not
>>> @@ -193,6 +194,7 @@ struct drm_sched_job {
>>>        struct dma_fence_cb        finish_cb;
>>>        struct work_struct        finish_work;
>>>        struct list_head        node;
>>> +    struct list_head        finish_node;
>>>        uint64_t            id;
>>>        atomic_t            karma;
>>>        enum drm_sched_priority        s_priority;
>>> @@ -298,9 +300,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
>>>                   void *owner);
>>>    void drm_sched_job_cleanup(struct drm_sched_job *job);
>>>    void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>>> -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
>>> -                struct drm_sched_job *job);
>>> -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
>>> +void drm_sched_stop(struct drm_gpu_scheduler *sched,
>>> +            struct drm_sched_job *job,
>>> +            bool park_only);
>>> +void drm_sched_start(struct drm_gpu_scheduler *sched, bool
>>> unpark_only);
>>> +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
>>>    bool drm_sched_dependency_optimized(struct dma_fence* fence,
>>>                        struct drm_sched_entity *entity);
>>>    void drm_sched_fault(struct drm_gpu_scheduler *sched);

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2018-12-17 18:16 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-10 21:43 [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling Andrey Grodzovsky
     [not found] ` <1544478238-13310-1-git-send-email-andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>
2018-12-10 21:43   ` [PATCH v3 2/2] drm/sched: Rework HW fence processing Andrey Grodzovsky
     [not found]     ` <1544478238-13310-2-git-send-email-andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>
2018-12-11  2:45       ` Zhou, David(ChunMing)
     [not found]         ` <SN1PR12MB05109A9D297261FC186B2627B4A60-z7L1TMIYDg5tVDmkcP8tDwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-12-11 16:01           ` Grodzovsky, Andrey
     [not found]             ` <28c3bebc-9661-198c-7ea9-e59260d658c9-5C7GfCeVMHo@public.gmane.org>
2018-12-11 16:20               ` Christian König
     [not found]                 ` <9a645f9c-93d1-12cc-044b-4bc1ea84d7d9-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-12-12 13:08                   ` Grodzovsky, Andrey
     [not found]                     ` <28bb2244-c2bf-3a3f-18e4-df9aef45cd58-5C7GfCeVMHo@public.gmane.org>
2018-12-14 15:14                       ` Grodzovsky, Andrey
2018-12-17 15:27   ` [PATCH v3 1/2] drm/sched: Refactor ring mirror list handling Christian König
2018-12-17 16:57     ` Grodzovsky, Andrey
     [not found]       ` <975594c7-3f8f-60f1-77b2-98d23b1a3646-5C7GfCeVMHo@public.gmane.org>
2018-12-17 18:16         ` Koenig, Christian

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).