Intel-GFX Archive on lore.kernel.org
 help / color / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Christoph Hellwig <hch@lst.de>
Cc: Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Minchan Kim <minchan@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	intel-gfx@lists.freedesktop.org, x86@kernel.org,
	linux-kernel@vger.kernel.org,
	Matthew Wilcox <willy@infradead.org>,
	Chris Wilson <chris@chris-wilson.co.uk>,
	linux-mm@kvack.org, dri-devel@lists.freedesktop.org,
	xen-devel@lists.xenproject.org,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Nitin Gupta <ngupta@vflare.org>,
	Matthew Auld <matthew.auld@intel.com>
Subject: Re: [Intel-gfx] [PATCH 3/6] drm/i915: use vmap in shmem_pin_map
Date: Tue, 22 Sep 2020 17:13:45 +0100
Message-ID: <e429c3e6-2143-f51a-4c1c-c1470076ad3e@linux.intel.com> (raw)
In-Reply-To: <20200922143141.GA26637@lst.de>


On 22/09/2020 15:31, Christoph Hellwig wrote:
> On Tue, Sep 22, 2020 at 09:23:59AM +0100, Tvrtko Ursulin wrote:
>> If I understood this sub-thread correctly, iterating and freeing the pages
>> via the vmapped ptes, so no need for a
>> shmem_read_mapping_page_gfp loop in shmem_unpin_map looks plausible to me.
>>
>> I did not get the reference to kernel/dma/remap.c though,
> 
> What I mean is the code in dma_common_find_pages, which returns the
> page array for freeing.

Got it.

>> and also not sure
>> how to do the error unwind path in shmem_pin_map at which point the
>> allocated vm area hasn't been fully populated yet. Hand-roll the loop
>> walking vm area struct in there?
> 
> Yes.  What I originally did (re-created as I didn't save it) would be
> something like this:
> 
> ---
>>From 5605e77cda246df6dd7ded99ec22cb3f341ef5d5 Mon Sep 17 00:00:00 2001
> From: Christoph Hellwig <hch@lst.de>
> Date: Wed, 16 Sep 2020 13:54:04 +0200
> Subject: drm/i915: use vmap in shmem_pin_map
> 
> shmem_pin_map somewhat awkwardly reimplements vmap using
> alloc_vm_area and manual pte setup.  The only practical difference
> is that alloc_vm_area prefeaults the vmalloc area PTEs, which doesn't
> seem to be required here (and could be added to vmap using a flag
> if actually required).
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/gpu/drm/i915/gt/shmem_utils.c | 81 +++++++++------------------
>   1 file changed, 27 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
> index 43c7acbdc79dea..7ec6ba4c1065b2 100644
> --- a/drivers/gpu/drm/i915/gt/shmem_utils.c
> +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
> @@ -49,80 +49,53 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
>   	return file;
>   }
>   
> -static size_t shmem_npte(struct file *file)
> +static size_t shmem_npages(struct file *file)
>   {
>   	return file->f_mapping->host->i_size >> PAGE_SHIFT;
>   }
>   
> -static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
> -{
> -	unsigned long pfn;
> -
> -	vunmap(ptr);
> -
> -	for (pfn = 0; pfn < n_pte; pfn++) {
> -		struct page *page;
> -
> -		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
> -						   GFP_KERNEL);
> -		if (!WARN_ON(IS_ERR(page))) {
> -			put_page(page);
> -			put_page(page);
> -		}
> -	}
> -}
> -
>   void *shmem_pin_map(struct file *file)
>   {
> -	const size_t n_pte = shmem_npte(file);
> -	pte_t *stack[32], **ptes, **mem;

Chris can comment how much he'd miss the 32 page stack shortcut.

> -	struct vm_struct *area;
> -	unsigned long pfn;
> -
> -	mem = stack;
> -	if (n_pte > ARRAY_SIZE(stack)) {
> -		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
> -		if (!mem)
> -			return NULL;
> -	}
> +	size_t n_pages = shmem_npages(file), i;
> +	struct page **pages;
> +	void *vaddr;
>   
> -	area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
> -	if (!area) {
> -		if (mem != stack)
> -			kvfree(mem);
> +	pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
> +	if (!pages)
>   		return NULL;
> -	}
> -
> -	ptes = mem;
> -	for (pfn = 0; pfn < n_pte; pfn++) {
> -		struct page *page;
>   
> -		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
> -						   GFP_KERNEL);
> -		if (IS_ERR(page))
> +	for (i = 0; i < n_pages; i++) {
> +		pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
> +						       GFP_KERNEL);
> +		if (IS_ERR(pages[i]))
>   			goto err_page;
> -
> -		**ptes++ = mk_pte(page,  PAGE_KERNEL);
>   	}
>   
> -	if (mem != stack)
> -		kvfree(mem);
> -
> +	vaddr = vmap(pages, n_pages, 0, PAGE_KERNEL);
> +	if (!vaddr)
> +		goto err_page;
>   	mapping_set_unevictable(file->f_mapping);
> -	return area->addr;
> -
> +	return vaddr;

Is there something in vmap() preventing us from freeing the pages array 
here? I can't spot anything that is holding on to the pointer. Or it was 
just a sketch before you realized we could walk the vm_area?

Also, I may be totally misunderstanding something, but I think you need 
to assign area->pages manually so shmem_unpin_map can access it below.

>   err_page:
> -	if (mem != stack)
> -		kvfree(mem);
> -
> -	__shmem_unpin_map(file, area->addr, pfn);
> +	while (--i >= 0)
> +		put_page(pages[i]);
> +	kvfree(pages);
>   	return NULL;
>   }
>   
>   void shmem_unpin_map(struct file *file, void *ptr)
>   {
> +	struct vm_struct *area = find_vm_area(ptr);
> +	size_t i = shmem_npages(file);
> +
> +	if (WARN_ON_ONCE(!area || !area->pages))
> +		return;
> +
>   	mapping_clear_unevictable(file->f_mapping);
> -	__shmem_unpin_map(file, ptr, shmem_npte(file));
> +	for (i = 0; i < shmem_npages(file); i++)
> +		put_page(area->pages[i]);
> +	kvfree(area->pages);
> +	vunmap(ptr);

Is the verdict from mm experts that we can't use vfree due __free_pages 
vs put_page differences?

Could we get from ptes to pages, so that we don't have to keep the 
area->pages array allocated for the duration of the pin?

Regards,

Tvrtko

>   }
>   
>   static int __shmem_rw(struct file *file, loff_t off,
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply index

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-18 16:37 [Intel-gfx] remove alloc_vm_area Christoph Hellwig
2020-09-18 16:37 ` [Intel-gfx] [PATCH 1/6] zsmalloc: switch from alloc_vm_area to get_vm_area Christoph Hellwig
2020-09-21 17:42   ` Minchan Kim
2020-09-21 18:17     ` Christoph Hellwig
2020-09-21 18:42       ` Minchan Kim
2020-09-21 18:43         ` Christoph Hellwig
2020-09-18 16:37 ` [Intel-gfx] [PATCH 2/6] mm: add a vmap_pfn function Christoph Hellwig
2020-09-18 16:37 ` [Intel-gfx] [PATCH 3/6] drm/i915: use vmap in shmem_pin_map Christoph Hellwig
2020-09-21 19:11   ` Matthew Wilcox
2020-09-22  6:22     ` Christoph Hellwig
2020-09-22  8:23       ` Tvrtko Ursulin
2020-09-22 14:31         ` Christoph Hellwig
2020-09-22 16:13           ` Tvrtko Ursulin [this message]
2020-09-22 16:33             ` Christoph Hellwig
2020-09-22 17:04               ` Tvrtko Ursulin
2020-09-23  6:11                 ` Christoph Hellwig
2020-09-22 11:21       ` Matthew Wilcox
2020-09-22 14:39         ` Christoph Hellwig
2020-09-22 14:53           ` Matthew Wilcox
2020-09-18 16:37 ` [Intel-gfx] [PATCH 4/6] drm/i915: use vmap in i915_gem_object_map Christoph Hellwig
2020-09-23  9:52   ` Tvrtko Ursulin
2020-09-23 13:41     ` Christoph Hellwig
2020-09-23 13:58       ` Tvrtko Ursulin
2020-09-23 14:44         ` Christoph Hellwig
2020-09-24 12:22           ` Tvrtko Ursulin
2020-09-24 13:23             ` Christoph Hellwig
2020-09-18 16:37 ` [Intel-gfx] [PATCH 5/6] xen/xenbus: use apply_to_page_range directly in xenbus_map_ring_pv Christoph Hellwig
2020-09-18 16:37 ` [Intel-gfx] [PATCH 6/6] x86/xen: open code alloc_vm_area in arch_gnttab_valloc Christoph Hellwig
2020-09-21 20:44   ` boris.ostrovsky
2020-09-22 14:58     ` Christoph Hellwig
2020-09-22 15:24       ` boris.ostrovsky
2020-09-22 15:27         ` Christoph Hellwig
2020-09-22 15:34           ` boris.ostrovsky
2020-09-18 17:03 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area Patchwork
2020-09-21 17:50 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev2) Patchwork
2020-09-21 18:47 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev3) Patchwork
2020-09-22 14:44 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev4) Patchwork
2020-09-22 15:01 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev5) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e429c3e6-2143-f51a-4c1c-c1470076ad3e@linux.intel.com \
    --to=tvrtko.ursulin@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=boris.ostrovsky@oracle.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=hch@lst.de \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jgross@suse.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=matthew.auld@intel.com \
    --cc=minchan@kernel.org \
    --cc=ngupta@vflare.org \
    --cc=peterz@infradead.org \
    --cc=sstabellini@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Intel-GFX Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/intel-gfx/0 intel-gfx/git/0.git
	git clone --mirror https://lore.kernel.org/intel-gfx/1 intel-gfx/git/1.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 intel-gfx intel-gfx/ https://lore.kernel.org/intel-gfx \
		intel-gfx@lists.freedesktop.org
	public-inbox-index intel-gfx

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.freedesktop.lists.intel-gfx


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git