All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lucas Stach <l.stach@pengutronix.de>
To: Eric Anholt <eric@anholt.net>, dri-devel@lists.freedesktop.org
Cc: linux-kernel@vger.kernel.org, amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/3 v2] drm/v3d: Take a lock across GPU scheduler job creation and queuing.
Date: Thu, 07 Jun 2018 10:37:34 +0200	[thread overview]
Message-ID: <1528360654.15027.2.camel@pengutronix.de> (raw)
In-Reply-To: <20180606174851.12433-1-eric@anholt.net>

Am Mittwoch, den 06.06.2018, 10:48 -0700 schrieb Eric Anholt:
> Between creation and queueing of a job, you need to prevent any other
> job from being created and queued.  Otherwise the scheduler's fences
> may be signaled out of seqno order.
> 
> v2: move mutex unlock to the error label.
> 
> > Signed-off-by: Eric Anholt <eric@anholt.net>
> Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+")

Reviewed-by: Lucas Stach <l.stach@pengutronix.de>

> ---
>  drivers/gpu/drm/v3d/v3d_drv.h | 5 +++++
>  drivers/gpu/drm/v3d/v3d_gem.c | 4 ++++
>  2 files changed, 9 insertions(+)
> 
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
> index a043ac3aae98..26005abd9c5d 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.h
> +++ b/drivers/gpu/drm/v3d/v3d_drv.h
> @@ -85,6 +85,11 @@ struct v3d_dev {
> >  	 */
> >  	struct mutex reset_lock;
>  
> > +	/* Lock taken when creating and pushing the GPU scheduler
> > +	 * jobs, to keep the sched-fence seqnos in order.
> > +	 */
> > +	struct mutex sched_lock;
> +
> >  	struct {
> >  		u32 num_allocated;
> >  		u32 pages_allocated;
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index b513f9189caf..269fe16379c0 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -550,6 +550,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	if (ret)
> >  		goto fail;
>  
> > +	mutex_lock(&v3d->sched_lock);
> >  	if (exec->bin.start != exec->bin.end) {
> >  		ret = drm_sched_job_init(&exec->bin.base,
> >  					 &v3d->queue[V3D_BIN].sched,
> @@ -576,6 +577,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	kref_get(&exec->refcount); /* put by scheduler job completion */
> >  	drm_sched_entity_push_job(&exec->render.base,
> >  				  &v3d_priv->sched_entity[V3D_RENDER]);
> > +	mutex_unlock(&v3d->sched_lock);
>  
> >  	v3d_attach_object_fences(exec);
>  
> @@ -594,6 +596,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	return 0;
>  
>  fail_unreserve:
> > +	mutex_unlock(&v3d->sched_lock);
> >  	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
>  fail:
> >  	v3d_exec_put(exec);
> @@ -615,6 +618,7 @@ v3d_gem_init(struct drm_device *dev)
> >  	spin_lock_init(&v3d->job_lock);
> >  	mutex_init(&v3d->bo_lock);
> >  	mutex_init(&v3d->reset_lock);
> > +	mutex_init(&v3d->sched_lock);
>  
> >  	/* Note: We don't allocate address 0.  Various bits of HW
> >  	 * treat 0 as special, such as the occlusion query counters

WARNING: multiple messages have this Message-ID (diff)
From: Lucas Stach <l.stach@pengutronix.de>
To: Eric Anholt <eric@anholt.net>, dri-devel@lists.freedesktop.org
Cc: linux-kernel@vger.kernel.org, amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/3 v2] drm/v3d: Take a lock across GPU scheduler job creation and queuing.
Date: Thu, 07 Jun 2018 10:37:34 +0200	[thread overview]
Message-ID: <1528360654.15027.2.camel@pengutronix.de> (raw)
In-Reply-To: <20180606174851.12433-1-eric@anholt.net>

Am Mittwoch, den 06.06.2018, 10:48 -0700 schrieb Eric Anholt:
> Between creation and queueing of a job, you need to prevent any other
> job from being created and queued.  Otherwise the scheduler's fences
> may be signaled out of seqno order.
> 
> v2: move mutex unlock to the error label.
> 
> > Signed-off-by: Eric Anholt <eric@anholt.net>
> Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+")

Reviewed-by: Lucas Stach <l.stach@pengutronix.de>

> ---
>  drivers/gpu/drm/v3d/v3d_drv.h | 5 +++++
>  drivers/gpu/drm/v3d/v3d_gem.c | 4 ++++
>  2 files changed, 9 insertions(+)
> 
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
> index a043ac3aae98..26005abd9c5d 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.h
> +++ b/drivers/gpu/drm/v3d/v3d_drv.h
> @@ -85,6 +85,11 @@ struct v3d_dev {
> >  	 */
> >  	struct mutex reset_lock;
>  
> > +	/* Lock taken when creating and pushing the GPU scheduler
> > +	 * jobs, to keep the sched-fence seqnos in order.
> > +	 */
> > +	struct mutex sched_lock;
> +
> >  	struct {
> >  		u32 num_allocated;
> >  		u32 pages_allocated;
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index b513f9189caf..269fe16379c0 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -550,6 +550,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	if (ret)
> >  		goto fail;
>  
> > +	mutex_lock(&v3d->sched_lock);
> >  	if (exec->bin.start != exec->bin.end) {
> >  		ret = drm_sched_job_init(&exec->bin.base,
> >  					 &v3d->queue[V3D_BIN].sched,
> @@ -576,6 +577,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	kref_get(&exec->refcount); /* put by scheduler job completion */
> >  	drm_sched_entity_push_job(&exec->render.base,
> >  				  &v3d_priv->sched_entity[V3D_RENDER]);
> > +	mutex_unlock(&v3d->sched_lock);
>  
> >  	v3d_attach_object_fences(exec);
>  
> @@ -594,6 +596,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	return 0;
>  
>  fail_unreserve:
> > +	mutex_unlock(&v3d->sched_lock);
> >  	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
>  fail:
> >  	v3d_exec_put(exec);
> @@ -615,6 +618,7 @@ v3d_gem_init(struct drm_device *dev)
> >  	spin_lock_init(&v3d->job_lock);
> >  	mutex_init(&v3d->bo_lock);
> >  	mutex_init(&v3d->reset_lock);
> > +	mutex_init(&v3d->sched_lock);
>  
> >  	/* Note: We don't allocate address 0.  Various bits of HW
> >  	 * treat 0 as special, such as the occlusion query counters
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  reply	other threads:[~2018-06-07  8:37 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-05 19:03 [PATCH 1/3] drm/v3d: Take a lock across GPU scheduler job creation and queuing Eric Anholt
2018-06-05 19:03 ` Eric Anholt
2018-06-05 19:03 ` [PATCH 2/3] drm/v3d: Remove the bad signaled() implementation Eric Anholt
2018-06-05 19:03   ` Eric Anholt
2018-06-08 10:21   ` Lucas Stach
2018-06-08 10:21     ` Lucas Stach
2018-06-05 19:03 ` [PATCH 3/3] drm/v3d: Add a note about locking of v3d_fence_create() Eric Anholt
2018-06-05 19:03   ` Eric Anholt
2018-06-08 10:24   ` Lucas Stach
2018-06-08 10:24     ` Lucas Stach
2018-06-08 17:08     ` Eric Anholt
2018-06-08 17:08       ` Eric Anholt
2018-06-06  8:46 ` [PATCH 1/3] drm/v3d: Take a lock across GPU scheduler job creation and queuing Lucas Stach
2018-06-06  8:46   ` Lucas Stach
2018-06-06  8:52   ` Christian König
2018-06-06 17:48   ` [PATCH 1/3 v2] " Eric Anholt
2018-06-06 17:48     ` Eric Anholt
2018-06-07  8:37     ` Lucas Stach [this message]
2018-06-07  8:37       ` Lucas Stach

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1528360654.15027.2.camel@pengutronix.de \
    --to=l.stach@pengutronix.de \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=eric@anholt.net \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.