All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
To: Sharat Masetty <smasetty-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	Christian.Koenig-5C7GfCeVMHo@public.gmane.org,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH] drm/scheduler: Add drm_sched_job_cleanup
Date: Fri, 26 Oct 2018 13:11:35 +0200	[thread overview]
Message-ID: <ad3a352b-a6eb-018b-df5b-3b4e5a6e7d60@gmail.com> (raw)
In-Reply-To: <1540551965-22215-1-git-send-email-smasetty-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>

Am 26.10.18 um 13:06 schrieb Sharat Masetty:
> This patch adds a new API to clean up the scheduler job resources. This
> is primarliy needed in cases the job was created but was not queued to
> the scheduler queue. Additionally with this change, the layer which
> creates the scheduler job also gets to free up the job's resources and
> this entails moving the dma_fence_put(finished_fence) to the drivers
> ops free handler routines.
>
> Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  3 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 ++
>   drivers/gpu/drm/etnaviv/etnaviv_sched.c  |  3 +++
>   drivers/gpu/drm/scheduler/sched_entity.c |  1 -
>   drivers/gpu/drm/scheduler/sched_main.c   | 12 +++++++++++-
>   drivers/gpu/drm/v3d/v3d_sched.c          |  2 ++
>   include/drm/gpu_scheduler.h              |  1 +
>   7 files changed, 20 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 663043c..5d768f9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1260,8 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>   	return 0;
>   
>   error_abort:
> -	dma_fence_put(&job->base.s_fence->finished);
> -	job->base.s_fence = NULL;
> +	drm_sched_job_cleanup(&job->base);
>   	amdgpu_mn_unlock(p->mn);
>   
>   error_unlock:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 755f733..e0af44f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
>   	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
>   	struct amdgpu_job *job = to_amdgpu_job(s_job);
>   
> +	drm_sched_job_cleanup(s_job);
> +
>   	amdgpu_ring_priority_put(ring, s_job->s_priority);
>   	dma_fence_put(job->fence);
>   	amdgpu_sync_free(&job->sync);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index e7c3ed6..6f3c9bf 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -127,6 +127,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
>   {
>   	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
>   
> +	drm_sched_job_cleanup(sched_job);
> +
>   	etnaviv_submit_put(submit);
>   }
>   
> @@ -159,6 +161,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
>   						submit->out_fence, 0,
>   						INT_MAX, GFP_KERNEL);
>   	if (submit->out_fence_id < 0) {
> +		drm_sched_job_cleanup(&submit->sched_job);
>   		ret = -ENOMEM;
>   		goto out_unlock;
>   	}
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index 3e22a54..8ff9d21f 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -204,7 +204,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>   
>   	drm_sched_fence_finished(job->s_fence);
>   	WARN_ON(job->s_fence->parent);
> -	dma_fence_put(&job->s_fence->finished);
>   	job->sched->ops->free_job(job);
>   }
>   
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 44fe587..147af89 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -220,7 +220,6 @@ static void drm_sched_job_finish(struct work_struct *work)
>   	drm_sched_start_timeout(sched);
>   	spin_unlock(&sched->job_list_lock);
>   
> -	dma_fence_put(&s_job->s_fence->finished);
>   	sched->ops->free_job(s_job);
>   }
>   
> @@ -424,6 +423,17 @@ int drm_sched_job_init(struct drm_sched_job *job,
>   EXPORT_SYMBOL(drm_sched_job_init);
>   
>   /**
> + * drm_sched_job_cleanup - clean up scheduler job resources
> + *
> + * @job: scheduler job to clean up
> + */
> +void drm_sched_job_cleanup(struct drm_sched_job *job)
> +{
> +	dma_fence_put(&job->s_fence->finished);

Please set job->s_fence to NULL here or otherwise we could try to free 
it again in some code paths.

Apart from that looks good to me,
Christian.

> +}
> +EXPORT_SYMBOL(drm_sched_job_cleanup);
> +
> +/**
>    * drm_sched_ready - is the scheduler ready
>    *
>    * @sched: scheduler instance
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index 9243dea..4ecd45e 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -35,6 +35,8 @@
>   {
>   	struct v3d_job *job = to_v3d_job(sched_job);
>   
> +	drm_sched_job_cleanup(sched_job);
> +
>   	v3d_exec_put(job->exec);
>   }
>   
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index d87b268..41136c4a 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -293,6 +293,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>   int drm_sched_job_init(struct drm_sched_job *job,
>   		       struct drm_sched_entity *entity,
>   		       void *owner);
> +void drm_sched_job_cleanup(struct drm_sched_job *job);
>   void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>   void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
>   			    struct drm_sched_job *job);

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

      parent reply	other threads:[~2018-10-26 11:11 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-26 11:06 [PATCH] drm/scheduler: Add drm_sched_job_cleanup Sharat Masetty
     [not found] ` <1540551965-22215-1-git-send-email-smasetty-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-10-26 11:11   ` Christian König [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ad3a352b-a6eb-018b-df5b-3b4e5a6e7d60@gmail.com \
    --to=ckoenig.leichtzumerken-re5jqeeqqe8avxtiumwx3w@public.gmane.org \
    --cc=Christian.Koenig-5C7GfCeVMHo@public.gmane.org \
    --cc=dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=linux-arm-msm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=smasetty-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.