All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Zimmermann <tzimmermann@suse.de>
To: Dmitry Osipenko <dmitry.osipenko@collabora.com>,
	David Airlie <airlied@linux.ie>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Gurchetan Singh <gurchetansingh@chromium.org>,
	Chia-I Wu <olvaffe@gmail.com>, Daniel Vetter <daniel@ffwll.ch>,
	Daniel Almeida <daniel.almeida@collabora.com>,
	Gert Wollny <gert.wollny@collabora.com>,
	Gustavo Padovan <gustavo.padovan@collabora.com>,
	Daniel Stone <daniel@fooishbar.org>,
	Tomeu Vizoso <tomeu.vizoso@collabora.com>,
	Maarten Lankhorst <maarten.lankhorst@linux.intel.com>,
	Maxime Ripard <mripard@kernel.org>, Rob Herring <robh@kernel.org>,
	Steven Price <steven.price@arm.com>,
	Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>,
	Rob Clark <robdclark@gmail.com>,
	Emil Velikov <emil.l.velikov@gmail.com>,
	Robin Murphy <robin.murphy@arm.com>
Cc: Dmitry Osipenko <digetx@gmail.com>,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org
Subject: Re: [PATCH v4 10/15] drm/shmem-helper: Take reservation lock instead of drm_gem_shmem locks
Date: Mon, 18 Apr 2022 20:38:45 +0200	[thread overview]
Message-ID: <248083d2-b8f2-a4d7-099d-70a7e7859c11@suse.de> (raw)
In-Reply-To: <20220417223707.157113-11-dmitry.osipenko@collabora.com>


[-- Attachment #1.1: Type: text/plain, Size: 11246 bytes --]

Hi

Am 18.04.22 um 00:37 schrieb Dmitry Osipenko:
> Replace drm_gem_shmem locks with the reservation lock to make GEM
> lockings more consistent.
> 
> Previously drm_gem_shmem_vmap() and drm_gem_shmem_get_pages() were
> protected by separate locks, now it's the same lock, but it doesn't
> make any difference for the current GEM SHMEM users. Only Panfrost
> and Lima drivers use vmap() and they do it in the slow code paths,
> hence there was no practical justification for the usage of separate
> lock in the vmap().
> 
> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
>   drivers/gpu/drm/drm_gem_shmem_helper.c  | 38 ++++++++++++-------------
>   drivers/gpu/drm/lima/lima_gem.c         |  8 +++---
>   drivers/gpu/drm/panfrost/panfrost_mmu.c | 15 ++++++----
>   include/drm/drm_gem_shmem_helper.h      | 10 -------
>   4 files changed, 31 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 30ee46348a99..3ecef571eff3 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -86,8 +86,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
>   	if (ret)
>   		goto err_release;
>   
> -	mutex_init(&shmem->pages_lock);
> -	mutex_init(&shmem->vmap_lock);
>   	INIT_LIST_HEAD(&shmem->madv_list);
>   
>   	if (!private) {
> @@ -157,8 +155,6 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
>   	WARN_ON(shmem->pages_use_count);
>   
>   	drm_gem_object_release(obj);
> -	mutex_destroy(&shmem->pages_lock);
> -	mutex_destroy(&shmem->vmap_lock);
>   	kfree(shmem);
>   }
>   EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
> @@ -209,11 +205,11 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
>   
>   	WARN_ON(shmem->base.import_attach);
>   
> -	ret = mutex_lock_interruptible(&shmem->pages_lock);
> +	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
>   	if (ret)
>   		return ret;
>   	ret = drm_gem_shmem_get_pages_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> @@ -248,9 +244,9 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
>    */
>   void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
>   {
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   	drm_gem_shmem_put_pages_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_put_pages);
>   
> @@ -310,7 +306,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>   	} else {
>   		pgprot_t prot = PAGE_KERNEL;
>   
> -		ret = drm_gem_shmem_get_pages(shmem);
> +		ret = drm_gem_shmem_get_pages_locked(shmem);
>   		if (ret)
>   			goto err_zero_use;
>   
> @@ -360,11 +356,11 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
>   {
>   	int ret;
>   
> -	ret = mutex_lock_interruptible(&shmem->vmap_lock);
> +	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
>   	if (ret)
>   		return ret;
>   	ret = drm_gem_shmem_vmap_locked(shmem, map);

Within drm_gem_shmem_vmap_locked(), there's a call to dma_buf_vmap() for 
imported pages. If the exporter side also holds/acquires the same 
reservation lock as our object, the whole thing can deadlock. We cannot 
move dma_buf_vmap() out of the CS, because we still need to increment 
the reference counter. I honestly don't know how to easily fix this 
problem. There's a TODO item about replacing these locks at [1]. As 
Daniel suggested this patch, we should talk to him about the issue.

Best regards
Thomas

[1] 
https://www.kernel.org/doc/html/latest/gpu/todo.html#move-buffer-object-locking-to-dma-resv-lock



> -	mutex_unlock(&shmem->vmap_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> @@ -385,7 +381,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   		dma_buf_vunmap(obj->import_attach->dmabuf, map);
>   	} else {
>   		vunmap(shmem->vaddr);
> -		drm_gem_shmem_put_pages(shmem);
> +		drm_gem_shmem_put_pages_locked(shmem);
>   	}
>   
>   	shmem->vaddr = NULL;
> @@ -406,9 +402,11 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
>   			  struct iosys_map *map)
>   {
> -	mutex_lock(&shmem->vmap_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   	drm_gem_shmem_vunmap_locked(shmem, map);
> -	mutex_unlock(&shmem->vmap_lock);
> +	dma_resv_unlock(shmem->base.resv);
> +
> +	drm_gem_shmem_update_purgeable_status(shmem);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_vunmap);
>   
> @@ -442,14 +440,14 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>    */
>   int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
>   {
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   
>   	if (shmem->madv >= 0)
>   		shmem->madv = madv;
>   
>   	madv = shmem->madv;
>   
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return (madv >= 0);
>   }
> @@ -487,10 +485,10 @@ EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
>   
>   bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
>   {
> -	if (!mutex_trylock(&shmem->pages_lock))
> +	if (!dma_resv_trylock(shmem->base.resv))
>   		return false;
>   	drm_gem_shmem_purge_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return true;
>   }
> @@ -549,7 +547,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>   	/* We don't use vmf->pgoff since that has the fake offset */
>   	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
>   
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   
>   	if (page_offset >= num_pages ||
>   	    WARN_ON_ONCE(!shmem->pages) ||
> @@ -561,7 +559,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>   		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
>   	}
>   
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
> index 0f1ca0b0db49..5008f0c2428f 100644
> --- a/drivers/gpu/drm/lima/lima_gem.c
> +++ b/drivers/gpu/drm/lima/lima_gem.c
> @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   
>   	new_size = min(new_size, bo->base.base.size);
>   
> -	mutex_lock(&bo->base.pages_lock);
> +	dma_resv_lock(bo->base.base.resv, NULL);
>   
>   	if (bo->base.pages) {
>   		pages = bo->base.pages;
> @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
>   				       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
>   		if (!pages) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(bo->base.base.resv);
>   			return -ENOMEM;
>   		}
>   
> @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   		struct page *page = shmem_read_mapping_page(mapping, i);
>   
>   		if (IS_ERR(page)) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(bo->base.base.resv);
>   			return PTR_ERR(page);
>   		}
>   		pages[i] = page;
>   	}
>   
> -	mutex_unlock(&bo->base.pages_lock);
> +	dma_resv_unlock(bo->base.base.resv);
>   
>   	ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
>   					new_size, GFP_KERNEL);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index d3f82b26a631..404b8f67e2df 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -424,6 +424,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	struct panfrost_gem_mapping *bomapping;
>   	struct panfrost_gem_object *bo;
>   	struct address_space *mapping;
> +	struct drm_gem_object *obj;
>   	pgoff_t page_offset;
>   	struct sg_table *sgt;
>   	struct page **pages;
> @@ -446,13 +447,15 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	page_offset = addr >> PAGE_SHIFT;
>   	page_offset -= bomapping->mmnode.start;
>   
> -	mutex_lock(&bo->base.pages_lock);
> +	obj = &bo->base.base;
> +
> +	dma_resv_lock(obj->resv, NULL);
>   
>   	if (!bo->base.pages) {
>   		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
>   				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
>   		if (!bo->sgts) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = -ENOMEM;
>   			goto err_bo;
>   		}
> @@ -462,7 +465,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   		if (!pages) {
>   			kvfree(bo->sgts);
>   			bo->sgts = NULL;
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = -ENOMEM;
>   			goto err_bo;
>   		}
> @@ -472,7 +475,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   		pages = bo->base.pages;
>   		if (pages[page_offset]) {
>   			/* Pages are already mapped, bail out. */
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			goto out;
>   		}
>   	}
> @@ -483,13 +486,13 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
>   		pages[i] = shmem_read_mapping_page(mapping, i);
>   		if (IS_ERR(pages[i])) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = PTR_ERR(pages[i]);
>   			goto err_pages;
>   		}
>   	}
>   
> -	mutex_unlock(&bo->base.pages_lock);
> +	dma_resv_unlock(obj->resv);
>   
>   	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
>   	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index d0a57853c188..70889533962a 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -26,11 +26,6 @@ struct drm_gem_shmem_object {
>   	 */
>   	struct drm_gem_object base;
>   
> -	/**
> -	 * @pages_lock: Protects the page table and use count
> -	 */
> -	struct mutex pages_lock;
> -
>   	/**
>   	 * @pages: Page table
>   	 */
> @@ -79,11 +74,6 @@ struct drm_gem_shmem_object {
>   	 */
>   	struct sg_table *sgt;
>   
> -	/**
> -	 * @vmap_lock: Protects the vmap address and use count
> -	 */
> -	struct mutex vmap_lock;
> -
>   	/**
>   	 * @vaddr: Kernel virtual address of the backing memory
>   	 */

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Ivo Totev

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 840 bytes --]

WARNING: multiple messages have this Message-ID (diff)
From: Thomas Zimmermann <tzimmermann@suse.de>
To: Dmitry Osipenko <dmitry.osipenko@collabora.com>,
	David Airlie <airlied@linux.ie>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Gurchetan Singh <gurchetansingh@chromium.org>,
	Chia-I Wu <olvaffe@gmail.com>, Daniel Vetter <daniel@ffwll.ch>,
	Daniel Almeida <daniel.almeida@collabora.com>,
	Gert Wollny <gert.wollny@collabora.com>,
	Gustavo Padovan <gustavo.padovan@collabora.com>,
	Daniel Stone <daniel@fooishbar.org>,
	Tomeu Vizoso <tomeu.vizoso@collabora.com>,
	Maarten Lankhorst <maarten.lankhorst@linux.intel.com>,
	Maxime Ripard <mripard@kernel.org>, Rob Herring <robh@kernel.org>,
	Steven Price <steven.price@arm.com>,
	Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>,
	Rob Clark <robdclark@gmail.com>,
	Emil Velikov <emil.l.velikov@gmail.com>,
	Robin Murphy <robin.murphy@arm.com>
Cc: Dmitry Osipenko <digetx@gmail.com>,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	virtualization@lists.linux-foundation.org
Subject: Re: [PATCH v4 10/15] drm/shmem-helper: Take reservation lock instead of drm_gem_shmem locks
Date: Mon, 18 Apr 2022 20:38:45 +0200	[thread overview]
Message-ID: <248083d2-b8f2-a4d7-099d-70a7e7859c11@suse.de> (raw)
In-Reply-To: <20220417223707.157113-11-dmitry.osipenko@collabora.com>


[-- Attachment #1.1.1: Type: text/plain, Size: 11246 bytes --]

Hi

Am 18.04.22 um 00:37 schrieb Dmitry Osipenko:
> Replace drm_gem_shmem locks with the reservation lock to make GEM
> lockings more consistent.
> 
> Previously drm_gem_shmem_vmap() and drm_gem_shmem_get_pages() were
> protected by separate locks, now it's the same lock, but it doesn't
> make any difference for the current GEM SHMEM users. Only Panfrost
> and Lima drivers use vmap() and they do it in the slow code paths,
> hence there was no practical justification for the usage of separate
> lock in the vmap().
> 
> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
>   drivers/gpu/drm/drm_gem_shmem_helper.c  | 38 ++++++++++++-------------
>   drivers/gpu/drm/lima/lima_gem.c         |  8 +++---
>   drivers/gpu/drm/panfrost/panfrost_mmu.c | 15 ++++++----
>   include/drm/drm_gem_shmem_helper.h      | 10 -------
>   4 files changed, 31 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 30ee46348a99..3ecef571eff3 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -86,8 +86,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
>   	if (ret)
>   		goto err_release;
>   
> -	mutex_init(&shmem->pages_lock);
> -	mutex_init(&shmem->vmap_lock);
>   	INIT_LIST_HEAD(&shmem->madv_list);
>   
>   	if (!private) {
> @@ -157,8 +155,6 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
>   	WARN_ON(shmem->pages_use_count);
>   
>   	drm_gem_object_release(obj);
> -	mutex_destroy(&shmem->pages_lock);
> -	mutex_destroy(&shmem->vmap_lock);
>   	kfree(shmem);
>   }
>   EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
> @@ -209,11 +205,11 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
>   
>   	WARN_ON(shmem->base.import_attach);
>   
> -	ret = mutex_lock_interruptible(&shmem->pages_lock);
> +	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
>   	if (ret)
>   		return ret;
>   	ret = drm_gem_shmem_get_pages_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> @@ -248,9 +244,9 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
>    */
>   void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
>   {
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   	drm_gem_shmem_put_pages_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_put_pages);
>   
> @@ -310,7 +306,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>   	} else {
>   		pgprot_t prot = PAGE_KERNEL;
>   
> -		ret = drm_gem_shmem_get_pages(shmem);
> +		ret = drm_gem_shmem_get_pages_locked(shmem);
>   		if (ret)
>   			goto err_zero_use;
>   
> @@ -360,11 +356,11 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
>   {
>   	int ret;
>   
> -	ret = mutex_lock_interruptible(&shmem->vmap_lock);
> +	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
>   	if (ret)
>   		return ret;
>   	ret = drm_gem_shmem_vmap_locked(shmem, map);

Within drm_gem_shmem_vmap_locked(), there's a call to dma_buf_vmap() for 
imported pages. If the exporter side also holds/acquires the same 
reservation lock as our object, the whole thing can deadlock. We cannot 
move dma_buf_vmap() out of the CS, because we still need to increment 
the reference counter. I honestly don't know how to easily fix this 
problem. There's a TODO item about replacing these locks at [1]. As 
Daniel suggested this patch, we should talk to him about the issue.

Best regards
Thomas

[1] 
https://www.kernel.org/doc/html/latest/gpu/todo.html#move-buffer-object-locking-to-dma-resv-lock



> -	mutex_unlock(&shmem->vmap_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> @@ -385,7 +381,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   		dma_buf_vunmap(obj->import_attach->dmabuf, map);
>   	} else {
>   		vunmap(shmem->vaddr);
> -		drm_gem_shmem_put_pages(shmem);
> +		drm_gem_shmem_put_pages_locked(shmem);
>   	}
>   
>   	shmem->vaddr = NULL;
> @@ -406,9 +402,11 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
>   			  struct iosys_map *map)
>   {
> -	mutex_lock(&shmem->vmap_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   	drm_gem_shmem_vunmap_locked(shmem, map);
> -	mutex_unlock(&shmem->vmap_lock);
> +	dma_resv_unlock(shmem->base.resv);
> +
> +	drm_gem_shmem_update_purgeable_status(shmem);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_vunmap);
>   
> @@ -442,14 +440,14 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>    */
>   int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
>   {
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   
>   	if (shmem->madv >= 0)
>   		shmem->madv = madv;
>   
>   	madv = shmem->madv;
>   
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return (madv >= 0);
>   }
> @@ -487,10 +485,10 @@ EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
>   
>   bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
>   {
> -	if (!mutex_trylock(&shmem->pages_lock))
> +	if (!dma_resv_trylock(shmem->base.resv))
>   		return false;
>   	drm_gem_shmem_purge_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return true;
>   }
> @@ -549,7 +547,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>   	/* We don't use vmf->pgoff since that has the fake offset */
>   	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
>   
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   
>   	if (page_offset >= num_pages ||
>   	    WARN_ON_ONCE(!shmem->pages) ||
> @@ -561,7 +559,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>   		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
>   	}
>   
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
> index 0f1ca0b0db49..5008f0c2428f 100644
> --- a/drivers/gpu/drm/lima/lima_gem.c
> +++ b/drivers/gpu/drm/lima/lima_gem.c
> @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   
>   	new_size = min(new_size, bo->base.base.size);
>   
> -	mutex_lock(&bo->base.pages_lock);
> +	dma_resv_lock(bo->base.base.resv, NULL);
>   
>   	if (bo->base.pages) {
>   		pages = bo->base.pages;
> @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
>   				       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
>   		if (!pages) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(bo->base.base.resv);
>   			return -ENOMEM;
>   		}
>   
> @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   		struct page *page = shmem_read_mapping_page(mapping, i);
>   
>   		if (IS_ERR(page)) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(bo->base.base.resv);
>   			return PTR_ERR(page);
>   		}
>   		pages[i] = page;
>   	}
>   
> -	mutex_unlock(&bo->base.pages_lock);
> +	dma_resv_unlock(bo->base.base.resv);
>   
>   	ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
>   					new_size, GFP_KERNEL);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index d3f82b26a631..404b8f67e2df 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -424,6 +424,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	struct panfrost_gem_mapping *bomapping;
>   	struct panfrost_gem_object *bo;
>   	struct address_space *mapping;
> +	struct drm_gem_object *obj;
>   	pgoff_t page_offset;
>   	struct sg_table *sgt;
>   	struct page **pages;
> @@ -446,13 +447,15 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	page_offset = addr >> PAGE_SHIFT;
>   	page_offset -= bomapping->mmnode.start;
>   
> -	mutex_lock(&bo->base.pages_lock);
> +	obj = &bo->base.base;
> +
> +	dma_resv_lock(obj->resv, NULL);
>   
>   	if (!bo->base.pages) {
>   		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
>   				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
>   		if (!bo->sgts) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = -ENOMEM;
>   			goto err_bo;
>   		}
> @@ -462,7 +465,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   		if (!pages) {
>   			kvfree(bo->sgts);
>   			bo->sgts = NULL;
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = -ENOMEM;
>   			goto err_bo;
>   		}
> @@ -472,7 +475,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   		pages = bo->base.pages;
>   		if (pages[page_offset]) {
>   			/* Pages are already mapped, bail out. */
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			goto out;
>   		}
>   	}
> @@ -483,13 +486,13 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
>   		pages[i] = shmem_read_mapping_page(mapping, i);
>   		if (IS_ERR(pages[i])) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = PTR_ERR(pages[i]);
>   			goto err_pages;
>   		}
>   	}
>   
> -	mutex_unlock(&bo->base.pages_lock);
> +	dma_resv_unlock(obj->resv);
>   
>   	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
>   	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index d0a57853c188..70889533962a 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -26,11 +26,6 @@ struct drm_gem_shmem_object {
>   	 */
>   	struct drm_gem_object base;
>   
> -	/**
> -	 * @pages_lock: Protects the page table and use count
> -	 */
> -	struct mutex pages_lock;
> -
>   	/**
>   	 * @pages: Page table
>   	 */
> @@ -79,11 +74,6 @@ struct drm_gem_shmem_object {
>   	 */
>   	struct sg_table *sgt;
>   
> -	/**
> -	 * @vmap_lock: Protects the vmap address and use count
> -	 */
> -	struct mutex vmap_lock;
> -
>   	/**
>   	 * @vaddr: Kernel virtual address of the backing memory
>   	 */

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Ivo Totev

[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 840 bytes --]

[-- Attachment #2: Type: text/plain, Size: 183 bytes --]

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  reply	other threads:[~2022-04-18 18:38 UTC|newest]

Thread overview: 123+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-17 22:36 [PATCH v4 00/15] Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers Dmitry Osipenko
2022-04-17 22:36 ` Dmitry Osipenko
2022-04-17 22:36 ` [PATCH v4 01/15] drm/virtio: Correct drm_gem_shmem_get_sg_table() error handling Dmitry Osipenko
2022-04-17 22:36   ` Dmitry Osipenko
2022-04-17 22:36 ` [PATCH v4 02/15] drm/virtio: Check whether transferred 2D BO is shmem Dmitry Osipenko
2022-04-17 22:36   ` Dmitry Osipenko
2022-04-17 22:36 ` [PATCH v4 03/15] drm/virtio: Unlock GEM reservations on virtio_gpu_object_shmem_init() error Dmitry Osipenko
2022-04-17 22:36   ` Dmitry Osipenko
2022-04-17 22:36 ` [PATCH v4 04/15] drm/virtio: Unlock reservations on dma_resv_reserve_fences() error Dmitry Osipenko
2022-04-17 22:36   ` Dmitry Osipenko
2022-04-17 22:36 ` [PATCH v4 05/15] drm/virtio: Use appropriate atomic state in virtio_gpu_plane_cleanup_fb() Dmitry Osipenko
2022-04-17 22:36   ` Dmitry Osipenko
2022-04-17 22:36 ` [PATCH v4 06/15] drm/virtio: Simplify error handling of virtio_gpu_object_create() Dmitry Osipenko
2022-04-17 22:36   ` Dmitry Osipenko
2022-04-17 22:36 ` [PATCH v4 07/15] drm/virtio: Improve DMA API usage for shmem BOs Dmitry Osipenko
2022-04-17 22:36   ` Dmitry Osipenko
2022-04-17 22:37 ` [PATCH v4 08/15] drm/virtio: Use dev_is_pci() Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko
2022-04-17 22:37 ` [PATCH v4 09/15] drm/shmem-helper: Correct doc-comment of drm_gem_shmem_get_sg_table() Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko
2022-04-18 18:25   ` Thomas Zimmermann
2022-04-18 18:25     ` Thomas Zimmermann
2022-04-18 18:25     ` Thomas Zimmermann
2022-04-18 19:43     ` Dmitry Osipenko
2022-04-18 19:43       ` Dmitry Osipenko
2022-04-17 22:37 ` [PATCH v4 10/15] drm/shmem-helper: Take reservation lock instead of drm_gem_shmem locks Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko
2022-04-18 18:38   ` Thomas Zimmermann [this message]
2022-04-18 18:38     ` Thomas Zimmermann
2022-04-18 19:18     ` Dmitry Osipenko
2022-04-27 14:50       ` Daniel Vetter
2022-04-27 14:50         ` Daniel Vetter
2022-04-27 14:50         ` Daniel Vetter
2022-04-28 18:31         ` Dmitry Osipenko
2022-05-04  8:21           ` Daniel Vetter
2022-05-04  8:21             ` Daniel Vetter
2022-05-04  8:21             ` Daniel Vetter
2022-05-04 15:56             ` Dmitry Osipenko
2022-05-05  8:12               ` Daniel Vetter
2022-05-05  8:12                 ` Daniel Vetter
2022-05-05  8:12                 ` Daniel Vetter
2022-05-05 22:49                 ` Dmitry Osipenko
2022-05-09 13:42                   ` Daniel Vetter
2022-05-09 13:42                     ` Daniel Vetter
2022-05-09 13:42                     ` Daniel Vetter
2022-05-10 13:39                     ` Dmitry Osipenko
2022-05-10 13:39                       ` Dmitry Osipenko
2022-05-11 13:00                       ` Daniel Vetter
2022-05-11 13:00                         ` Daniel Vetter
2022-05-11 13:00                         ` Daniel Vetter
2022-05-11 14:24                         ` Christian König
2022-05-11 14:24                           ` Christian König
2022-05-11 15:07                           ` Daniel Vetter
2022-05-11 15:07                             ` Daniel Vetter
2022-05-11 15:07                             ` Daniel Vetter
2022-05-11 15:14                           ` Dmitry Osipenko
2022-05-11 15:14                             ` Dmitry Osipenko
2022-05-11 15:29                             ` Daniel Vetter
2022-05-11 15:29                               ` Daniel Vetter
2022-05-11 15:29                               ` Daniel Vetter
2022-05-11 15:40                               ` Dmitry Osipenko
2022-05-11 15:40                                 ` Dmitry Osipenko
2022-05-11 19:05                                 ` Daniel Vetter
2022-05-11 19:05                                   ` Daniel Vetter
2022-05-11 19:05                                   ` Daniel Vetter
2022-05-12  7:29                                   ` Christian König
2022-05-12  7:29                                     ` Christian König
2022-05-12 14:15                                     ` Daniel Vetter
2022-05-12 14:15                                       ` Daniel Vetter
2022-05-12 14:15                                       ` Daniel Vetter
2022-04-17 22:37 ` [PATCH v4 11/15] drm/shmem-helper: Add generic memory shrinker Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko
2022-04-19  7:22   ` Thomas Zimmermann
2022-04-19  7:22     ` Thomas Zimmermann
2022-04-19 20:40     ` Dmitry Osipenko
2022-04-27 15:03       ` Daniel Vetter
2022-04-27 15:03         ` Daniel Vetter
2022-04-27 15:03         ` Daniel Vetter
2022-04-28 18:20         ` Dmitry Osipenko
2022-05-04  8:24           ` Daniel Vetter
2022-05-04  8:24             ` Daniel Vetter
2022-05-04  8:24             ` Daniel Vetter
2022-06-19 16:54           ` Rob Clark
2022-06-19 16:54             ` Rob Clark
2022-06-19 16:54             ` Rob Clark
2022-05-05  8:34   ` Thomas Zimmermann
2022-05-05  8:34     ` Thomas Zimmermann
2022-05-05 11:59     ` Daniel Vetter
2022-05-05 11:59       ` Daniel Vetter
2022-05-05 11:59       ` Daniel Vetter
2022-05-06  0:10     ` Dmitry Osipenko
2022-05-09 13:49       ` Daniel Vetter
2022-05-09 13:49         ` Daniel Vetter
2022-05-09 13:49         ` Daniel Vetter
2022-05-10 13:47         ` Dmitry Osipenko
2022-05-10 13:47           ` Dmitry Osipenko
2022-05-11 13:09           ` Daniel Vetter
2022-05-11 13:09             ` Daniel Vetter
2022-05-11 13:09             ` Daniel Vetter
2022-05-11 16:06             ` Dmitry Osipenko
2022-05-11 16:06               ` Dmitry Osipenko
2022-05-11 19:09               ` Daniel Vetter
2022-05-11 19:09                 ` Daniel Vetter
2022-05-11 19:09                 ` Daniel Vetter
2022-05-12 11:36                 ` Dmitry Osipenko
2022-05-12 11:36                   ` Dmitry Osipenko
2022-05-12 17:04                   ` Daniel Vetter
2022-05-12 17:04                     ` Daniel Vetter
2022-05-12 17:04                     ` Daniel Vetter
2022-05-12 19:04                     ` Dmitry Osipenko
2022-05-12 19:04                       ` Dmitry Osipenko
2022-05-19 14:13                       ` Daniel Vetter
2022-05-19 14:13                         ` Daniel Vetter
2022-05-19 14:13                         ` Daniel Vetter
2022-05-19 14:29                         ` Dmitry Osipenko
2022-04-17 22:37 ` [PATCH v4 12/15] drm/virtio: Support memory shrinking Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko
2022-04-17 22:37 ` [PATCH v4 13/15] drm/panfrost: Switch to generic memory shrinker Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko
2022-04-17 22:37 ` [PATCH v4 14/15] drm/shmem-helper: Make drm_gem_shmem_get_pages() private Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko
2022-04-17 22:37 ` [PATCH v4 15/15] drm/shmem-helper: Remove drm_gem_shmem_purge() Dmitry Osipenko
2022-04-17 22:37   ` Dmitry Osipenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=248083d2-b8f2-a4d7-099d-70a7e7859c11@suse.de \
    --to=tzimmermann@suse.de \
    --cc=airlied@linux.ie \
    --cc=alyssa.rosenzweig@collabora.com \
    --cc=daniel.almeida@collabora.com \
    --cc=daniel@ffwll.ch \
    --cc=daniel@fooishbar.org \
    --cc=digetx@gmail.com \
    --cc=dmitry.osipenko@collabora.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=emil.l.velikov@gmail.com \
    --cc=gert.wollny@collabora.com \
    --cc=gurchetansingh@chromium.org \
    --cc=gustavo.padovan@collabora.com \
    --cc=kraxel@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=mripard@kernel.org \
    --cc=olvaffe@gmail.com \
    --cc=robdclark@gmail.com \
    --cc=robh@kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=steven.price@arm.com \
    --cc=tomeu.vizoso@collabora.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.