All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/scheduler: Add drm_sched_job_cleanup
@ 2018-10-26 11:06 Sharat Masetty
       [not found] ` <1540551965-22215-1-git-send-email-smasetty-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  0 siblings, 1 reply; 2+ messages in thread
From: Sharat Masetty @ 2018-10-26 11:06 UTC (permalink / raw)
  To: freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Sharat Masetty, linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

This patch adds a new API to clean up the scheduler job resources. This
is primarliy needed in cases the job was created but was not queued to
the scheduler queue. Additionally with this change, the layer which
creates the scheduler job also gets to free up the job's resources and
this entails moving the dma_fence_put(finished_fence) to the drivers
ops free handler routines.

Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 ++
 drivers/gpu/drm/etnaviv/etnaviv_sched.c  |  3 +++
 drivers/gpu/drm/scheduler/sched_entity.c |  1 -
 drivers/gpu/drm/scheduler/sched_main.c   | 12 +++++++++++-
 drivers/gpu/drm/v3d/v3d_sched.c          |  2 ++
 include/drm/gpu_scheduler.h              |  1 +
 7 files changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 663043c..5d768f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1260,8 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	return 0;
 
 error_abort:
-	dma_fence_put(&job->base.s_fence->finished);
-	job->base.s_fence = NULL;
+	drm_sched_job_cleanup(&job->base);
 	amdgpu_mn_unlock(p->mn);
 
 error_unlock:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 755f733..e0af44f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
 	struct amdgpu_job *job = to_amdgpu_job(s_job);
 
+	drm_sched_job_cleanup(s_job);
+
 	amdgpu_ring_priority_put(ring, s_job->s_priority);
 	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index e7c3ed6..6f3c9bf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -127,6 +127,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 {
 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	etnaviv_submit_put(submit);
 }
 
@@ -159,6 +161,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
 						submit->out_fence, 0,
 						INT_MAX, GFP_KERNEL);
 	if (submit->out_fence_id < 0) {
+		drm_sched_job_cleanup(&submit->sched_job);
 		ret = -ENOMEM;
 		goto out_unlock;
 	}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3e22a54..8ff9d21f 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -204,7 +204,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 
 	drm_sched_fence_finished(job->s_fence);
 	WARN_ON(job->s_fence->parent);
-	dma_fence_put(&job->s_fence->finished);
 	job->sched->ops->free_job(job);
 }
 
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 44fe587..147af89 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -220,7 +220,6 @@ static void drm_sched_job_finish(struct work_struct *work)
 	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 
-	dma_fence_put(&s_job->s_fence->finished);
 	sched->ops->free_job(s_job);
 }
 
@@ -424,6 +423,17 @@ int drm_sched_job_init(struct drm_sched_job *job,
 EXPORT_SYMBOL(drm_sched_job_init);
 
 /**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ *
+ * @job: scheduler job to clean up
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+	dma_fence_put(&job->s_fence->finished);
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
  * drm_sched_ready - is the scheduler ready
  *
  * @sched: scheduler instance
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 9243dea..4ecd45e 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -35,6 +35,8 @@
 {
 	struct v3d_job *job = to_v3d_job(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	v3d_exec_put(job->exec);
 }
 
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268..41136c4a 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -293,6 +293,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_sched_entity *entity,
 		       void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
 			    struct drm_sched_job *job);
-- 
1.9.1

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] drm/scheduler: Add drm_sched_job_cleanup
       [not found] ` <1540551965-22215-1-git-send-email-smasetty-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-10-26 11:11   ` Christian König
  0 siblings, 0 replies; 2+ messages in thread
From: Christian König @ 2018-10-26 11:11 UTC (permalink / raw)
  To: Sharat Masetty, freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	Christian.Koenig-5C7GfCeVMHo,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 26.10.18 um 13:06 schrieb Sharat Masetty:
> This patch adds a new API to clean up the scheduler job resources. This
> is primarliy needed in cases the job was created but was not queued to
> the scheduler queue. Additionally with this change, the layer which
> creates the scheduler job also gets to free up the job's resources and
> this entails moving the dma_fence_put(finished_fence) to the drivers
> ops free handler routines.
>
> Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  3 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 ++
>   drivers/gpu/drm/etnaviv/etnaviv_sched.c  |  3 +++
>   drivers/gpu/drm/scheduler/sched_entity.c |  1 -
>   drivers/gpu/drm/scheduler/sched_main.c   | 12 +++++++++++-
>   drivers/gpu/drm/v3d/v3d_sched.c          |  2 ++
>   include/drm/gpu_scheduler.h              |  1 +
>   7 files changed, 20 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 663043c..5d768f9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1260,8 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>   	return 0;
>   
>   error_abort:
> -	dma_fence_put(&job->base.s_fence->finished);
> -	job->base.s_fence = NULL;
> +	drm_sched_job_cleanup(&job->base);
>   	amdgpu_mn_unlock(p->mn);
>   
>   error_unlock:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 755f733..e0af44f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
>   	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
>   	struct amdgpu_job *job = to_amdgpu_job(s_job);
>   
> +	drm_sched_job_cleanup(s_job);
> +
>   	amdgpu_ring_priority_put(ring, s_job->s_priority);
>   	dma_fence_put(job->fence);
>   	amdgpu_sync_free(&job->sync);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index e7c3ed6..6f3c9bf 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -127,6 +127,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
>   {
>   	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
>   
> +	drm_sched_job_cleanup(sched_job);
> +
>   	etnaviv_submit_put(submit);
>   }
>   
> @@ -159,6 +161,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
>   						submit->out_fence, 0,
>   						INT_MAX, GFP_KERNEL);
>   	if (submit->out_fence_id < 0) {
> +		drm_sched_job_cleanup(&submit->sched_job);
>   		ret = -ENOMEM;
>   		goto out_unlock;
>   	}
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index 3e22a54..8ff9d21f 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -204,7 +204,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>   
>   	drm_sched_fence_finished(job->s_fence);
>   	WARN_ON(job->s_fence->parent);
> -	dma_fence_put(&job->s_fence->finished);
>   	job->sched->ops->free_job(job);
>   }
>   
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 44fe587..147af89 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -220,7 +220,6 @@ static void drm_sched_job_finish(struct work_struct *work)
>   	drm_sched_start_timeout(sched);
>   	spin_unlock(&sched->job_list_lock);
>   
> -	dma_fence_put(&s_job->s_fence->finished);
>   	sched->ops->free_job(s_job);
>   }
>   
> @@ -424,6 +423,17 @@ int drm_sched_job_init(struct drm_sched_job *job,
>   EXPORT_SYMBOL(drm_sched_job_init);
>   
>   /**
> + * drm_sched_job_cleanup - clean up scheduler job resources
> + *
> + * @job: scheduler job to clean up
> + */
> +void drm_sched_job_cleanup(struct drm_sched_job *job)
> +{
> +	dma_fence_put(&job->s_fence->finished);

Please set job->s_fence to NULL here or otherwise we could try to free 
it again in some code paths.

Apart from that looks good to me,
Christian.

> +}
> +EXPORT_SYMBOL(drm_sched_job_cleanup);
> +
> +/**
>    * drm_sched_ready - is the scheduler ready
>    *
>    * @sched: scheduler instance
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index 9243dea..4ecd45e 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -35,6 +35,8 @@
>   {
>   	struct v3d_job *job = to_v3d_job(sched_job);
>   
> +	drm_sched_job_cleanup(sched_job);
> +
>   	v3d_exec_put(job->exec);
>   }
>   
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index d87b268..41136c4a 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -293,6 +293,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>   int drm_sched_job_init(struct drm_sched_job *job,
>   		       struct drm_sched_entity *entity,
>   		       void *owner);
> +void drm_sched_job_cleanup(struct drm_sched_job *job);
>   void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>   void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
>   			    struct drm_sched_job *job);

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-10-26 11:11 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-26 11:06 [PATCH] drm/scheduler: Add drm_sched_job_cleanup Sharat Masetty
     [not found] ` <1540551965-22215-1-git-send-email-smasetty-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-10-26 11:11   ` Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.