dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Jordan Crouse <jcrouse@codeaurora.org>
To: Rob Clark <robdclark@gmail.com>
Cc: Rob Clark <robdclark@chromium.org>,
	David Airlie <airlied@linux.ie>,
	"open list:DRM DRIVER FOR MSM ADRENO GPU"
	<linux-arm-msm@vger.kernel.org>,
	open list <linux-kernel@vger.kernel.org>,
	dri-devel@lists.freedesktop.org, Sean Paul <sean@poorly.run>,
	"open list:DRM DRIVER FOR MSM ADRENO GPU"
	<freedreno@lists.freedesktop.org>
Subject: Re: [Freedreno] [PATCH 04/14] drm/msm: Add priv->mm_lock to protect active/inactive lists
Date: Mon, 5 Oct 2020 08:19:40 -0600	[thread overview]
Message-ID: <20201005141940.GC4204@jcrouse1-lnx.qualcomm.com> (raw)
In-Reply-To: <20201004192152.3298573-5-robdclark@gmail.com>

On Sun, Oct 04, 2020 at 12:21:36PM -0700, Rob Clark wrote:
> From: Rob Clark <robdclark@chromium.org>
> 
> Rather than relying on the big dev->struct_mutex hammer, introduce a
> more specific lock for protecting the bo lists.

Most excellent.

Reviewed-by: Jordan Crouse <jcrouse@codeaurora.org>

> Signed-off-by: Rob Clark <robdclark@chromium.org>
> ---
>  drivers/gpu/drm/msm/msm_debugfs.c      |  7 +++++++
>  drivers/gpu/drm/msm/msm_drv.c          |  1 +
>  drivers/gpu/drm/msm/msm_drv.h          | 13 +++++++++++-
>  drivers/gpu/drm/msm/msm_gem.c          | 28 +++++++++++++++-----------
>  drivers/gpu/drm/msm/msm_gem_shrinker.c | 12 +++++++++++
>  drivers/gpu/drm/msm/msm_gpu.h          |  5 ++++-
>  6 files changed, 52 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
> index ee2e270f464c..64afbed89821 100644
> --- a/drivers/gpu/drm/msm/msm_debugfs.c
> +++ b/drivers/gpu/drm/msm/msm_debugfs.c
> @@ -112,6 +112,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
>  {
>  	struct msm_drm_private *priv = dev->dev_private;
>  	struct msm_gpu *gpu = priv->gpu;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&priv->mm_lock);
> +	if (ret)
> +		return ret;
>  
>  	if (gpu) {
>  		seq_printf(m, "Active Objects (%s):\n", gpu->name);
> @@ -121,6 +126,8 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
>  	seq_printf(m, "Inactive Objects:\n");
>  	msm_gem_describe_objects(&priv->inactive_list, m);
>  
> +	mutex_unlock(&priv->mm_lock);
> +
>  	return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 49685571dc0e..dc6efc089285 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -441,6 +441,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	init_llist_head(&priv->free_list);
>  
>  	INIT_LIST_HEAD(&priv->inactive_list);
> +	mutex_init(&priv->mm_lock);
>  
>  	drm_mode_config_init(ddev);
>  
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index b9dd8f8f4887..50978e5db376 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -174,8 +174,19 @@ struct msm_drm_private {
>  	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
>  	struct msm_perf_state *perf;
>  
> -	/* list of GEM objects: */
> +	/*
> +	 * List of inactive GEM objects.  Every bo is either in the inactive_list
> +	 * or gpu->active_list (for the gpu it is active on[1])
> +	 *
> +	 * These lists are protected by mm_lock.  If struct_mutex is involved, it
> +	 * should be aquired prior to mm_lock.  One should *not* hold mm_lock in
> +	 * get_pages()/vmap()/etc paths, as they can trigger the shrinker.
> +	 *
> +	 * [1] if someone ever added support for the old 2d cores, there could be
> +	 *     more than one gpu object
> +	 */
>  	struct list_head inactive_list;
> +	struct mutex mm_lock;
>  
>  	/* worker for delayed free of objects: */
>  	struct work_struct free_work;
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index a870b3ad129d..b04ed8b52f9d 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -746,13 +746,17 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
>  void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
>  {
>  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> -	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
> +	struct msm_drm_private *priv = obj->dev->dev_private;
> +
> +	might_sleep();
>  	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
>  
>  	if (!atomic_fetch_inc(&msm_obj->active_count)) {
> +		mutex_lock(&priv->mm_lock);
>  		msm_obj->gpu = gpu;
>  		list_del_init(&msm_obj->mm_list);
>  		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
> +		mutex_unlock(&priv->mm_lock);
>  	}
>  }
>  
> @@ -761,12 +765,14 @@ void msm_gem_active_put(struct drm_gem_object *obj)
>  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
>  	struct msm_drm_private *priv = obj->dev->dev_private;
>  
> -	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
> +	might_sleep();
>  
>  	if (!atomic_dec_return(&msm_obj->active_count)) {
> +		mutex_lock(&priv->mm_lock);
>  		msm_obj->gpu = NULL;
>  		list_del_init(&msm_obj->mm_list);
>  		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
> +		mutex_unlock(&priv->mm_lock);
>  	}
>  }
>  
> @@ -921,13 +927,16 @@ static void free_object(struct msm_gem_object *msm_obj)
>  {
>  	struct drm_gem_object *obj = &msm_obj->base;
>  	struct drm_device *dev = obj->dev;
> +	struct msm_drm_private *priv = dev->dev_private;
>  
>  	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
>  
>  	/* object should not be on active list: */
>  	WARN_ON(is_active(msm_obj));
>  
> +	mutex_lock(&priv->mm_lock);
>  	list_del(&msm_obj->mm_list);
> +	mutex_unlock(&priv->mm_lock);
>  
>  	mutex_lock(&msm_obj->lock);
>  
> @@ -1103,14 +1112,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
>  		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
>  	}
>  
> -	if (struct_mutex_locked) {
> -		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> -		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
> -	} else {
> -		mutex_lock(&dev->struct_mutex);
> -		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
> -		mutex_unlock(&dev->struct_mutex);
> -	}
> +	mutex_lock(&priv->mm_lock);
> +	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
> +	mutex_unlock(&priv->mm_lock);
>  
>  	return obj;
>  
> @@ -1178,9 +1182,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
>  
>  	mutex_unlock(&msm_obj->lock);
>  
> -	mutex_lock(&dev->struct_mutex);
> +	mutex_lock(&priv->mm_lock);
>  	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
> -	mutex_unlock(&dev->struct_mutex);
> +	mutex_unlock(&priv->mm_lock);
>  
>  	return obj;
>  
> diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
> index 482576d7a39a..c41b84a3a484 100644
> --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
> +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
> @@ -51,11 +51,15 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
>  	if (!msm_gem_shrinker_lock(dev, &unlock))
>  		return 0;
>  
> +	mutex_lock(&priv->mm_lock);
> +
>  	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
>  		if (is_purgeable(msm_obj))
>  			count += msm_obj->base.size >> PAGE_SHIFT;
>  	}
>  
> +	mutex_unlock(&priv->mm_lock);
> +
>  	if (unlock)
>  		mutex_unlock(&dev->struct_mutex);
>  
> @@ -75,6 +79,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
>  	if (!msm_gem_shrinker_lock(dev, &unlock))
>  		return SHRINK_STOP;
>  
> +	mutex_lock(&priv->mm_lock);
> +
>  	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
>  		if (freed >= sc->nr_to_scan)
>  			break;
> @@ -84,6 +90,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
>  		}
>  	}
>  
> +	mutex_unlock(&priv->mm_lock);
> +
>  	if (unlock)
>  		mutex_unlock(&dev->struct_mutex);
>  
> @@ -106,6 +114,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
>  	if (!msm_gem_shrinker_lock(dev, &unlock))
>  		return NOTIFY_DONE;
>  
> +	mutex_lock(&priv->mm_lock);
> +
>  	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
>  		if (is_vunmapable(msm_obj)) {
>  			msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
> @@ -118,6 +128,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
>  		}
>  	}
>  
> +	mutex_unlock(&priv->mm_lock);
> +
>  	if (unlock)
>  		mutex_unlock(&dev->struct_mutex);
>  
> diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
> index 6c9e1fdc1a76..1806e87600c0 100644
> --- a/drivers/gpu/drm/msm/msm_gpu.h
> +++ b/drivers/gpu/drm/msm/msm_gpu.h
> @@ -94,7 +94,10 @@ struct msm_gpu {
>  	struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
>  	int nr_rings;
>  
> -	/* list of GEM active objects: */
> +	/*
> +	 * List of GEM active objects on this gpu.  Protected by
> +	 * msm_drm_private::mm_lock
> +	 */
>  	struct list_head active_list;
>  
>  	/* does gpu need hw_init? */
> -- 
> 2.26.2
> 
> _______________________________________________
> Freedreno mailing list
> Freedreno@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2020-10-05 14:20 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-04 19:21 [PATCH 00/14] drm/msm: de-struct_mutex-ification Rob Clark
2020-10-04 19:21 ` [PATCH 01/14] drm/msm: Use correct drm_gem_object_put() in fail case Rob Clark
2020-10-04 19:21 ` [PATCH 02/14] drm/msm: Drop chatty trace Rob Clark
2020-10-05 14:15   ` [Freedreno] " Jordan Crouse
2020-10-04 19:21 ` [PATCH 03/14] drm/msm: Move update_fences() Rob Clark
2020-10-05 14:16   ` [Freedreno] " Jordan Crouse
2020-10-04 19:21 ` [PATCH 04/14] drm/msm: Add priv->mm_lock to protect active/inactive lists Rob Clark
2020-10-04 22:15   ` Daniel Vetter
2020-10-05  0:10     ` Rob Clark
2020-10-05 14:19   ` Jordan Crouse [this message]
2020-10-04 19:21 ` [PATCH 05/14] drm/msm: Document and rename preempt_lock Rob Clark
2020-10-05 14:22   ` Jordan Crouse
2020-10-04 19:21 ` [PATCH 06/14] drm/msm: Protect ring->submits with it's own lock Rob Clark
2020-10-05 14:23   ` [Freedreno] " Jordan Crouse
2020-10-04 19:21 ` [PATCH 07/14] drm/msm: Refcount submits Rob Clark
2020-10-05 13:56   ` Daniel Vetter
2020-10-05 16:24     ` Rob Clark
2020-10-05 14:27   ` [Freedreno] " Jordan Crouse
2020-10-04 19:21 ` [PATCH 08/14] drm/msm: Remove obj->gpu Rob Clark
2020-10-05 14:28   ` [Freedreno] " Jordan Crouse
2020-10-04 19:21 ` [PATCH 09/14] drm/msm: Drop struct_mutex from the retire path Rob Clark
2020-10-05 14:29   ` [Freedreno] " Jordan Crouse
2020-10-04 19:21 ` [PATCH 10/14] drm/msm: Drop struct_mutex in free_object() path Rob Clark
2020-10-04 19:21 ` [PATCH 11/14] drm/msm: remove msm_gem_free_work Rob Clark
2020-10-04 19:21 ` [PATCH 12/14] drm/msm: drop struct_mutex in madvise path Rob Clark
2020-10-04 19:21 ` [PATCH 13/14] drm/msm: Drop struct_mutex in shrinker path Rob Clark
2020-10-05  9:24   ` Hillf Danton
2020-10-05 14:02     ` Daniel Vetter
2020-10-05 16:17       ` Kristian Høgsberg
2020-10-06  0:44         ` Hillf Danton
2020-10-06  3:40           ` Rob Clark
2020-10-06  8:24             ` Hillf Danton
2020-10-06  9:35             ` Daniel Vetter
2020-10-05 16:49       ` Rob Clark
2020-10-05 18:18         ` Daniel Vetter
2020-10-04 19:21 ` [PATCH 14/14] drm/msm: Don't implicit-sync if only a single ring Rob Clark
2020-10-05 16:24 ` [Freedreno] [PATCH 00/14] drm/msm: de-struct_mutex-ification Kristian Høgsberg
2020-10-05 18:20   ` Daniel Vetter
2020-10-06  3:25     ` Rob Clark

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201005141940.GC4204@jcrouse1-lnx.qualcomm.com \
    --to=jcrouse@codeaurora.org \
    --cc=airlied@linux.ie \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=freedreno@lists.freedesktop.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=robdclark@chromium.org \
    --cc=robdclark@gmail.com \
    --cc=sean@poorly.run \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).