All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Minchan Kim <minchan@kernel.org>, Nitin Gupta <ngupta@vflare.org>,
	x86@kernel.org, xen-devel@lists.xenproject.org,
	linux-kernel@vger.kernel.org, intel-gfx@lists.freedesktop.org,
	dri-devel@lists.freedesktop.org, linux-mm@kvack.org
Subject: [PATCH 3/6] drm/i915: use vmap in shmem_pin_map
Date: Fri, 18 Sep 2020 18:37:21 +0200	[thread overview]
Message-ID: <20200918163724.2511-4-hch@lst.de> (raw)
In-Reply-To: <20200918163724.2511-1-hch@lst.de>

shmem_pin_map somewhat awkwardly reimplements vmap using
alloc_vm_area and manual pte setup.  The only practical difference
is that alloc_vm_area prefeaults the vmalloc area PTEs, which doesn't
seem to be required here (and could be added to vmap using a flag
if actually required).

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/gpu/drm/i915/gt/shmem_utils.c | 90 +++++++++++----------------
 1 file changed, 38 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 43c7acbdc79dea..77410091597f19 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -49,80 +49,66 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
 	return file;
 }
 
-static size_t shmem_npte(struct file *file)
+static size_t shmem_npages(struct file *file)
 {
 	return file->f_mapping->host->i_size >> PAGE_SHIFT;
 }
 
-static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
-{
-	unsigned long pfn;
-
-	vunmap(ptr);
-
-	for (pfn = 0; pfn < n_pte; pfn++) {
-		struct page *page;
-
-		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
-						   GFP_KERNEL);
-		if (!WARN_ON(IS_ERR(page))) {
-			put_page(page);
-			put_page(page);
-		}
-	}
-}
-
 void *shmem_pin_map(struct file *file)
 {
-	const size_t n_pte = shmem_npte(file);
-	pte_t *stack[32], **ptes, **mem;
-	struct vm_struct *area;
-	unsigned long pfn;
-
-	mem = stack;
-	if (n_pte > ARRAY_SIZE(stack)) {
-		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
-		if (!mem)
+	const size_t n_pages = shmem_npages(file);
+	struct page **pages, *stack[32];
+	void *vaddr;
+	long i;
+
+	pages = stack;
+	if (n_pages > ARRAY_SIZE(stack)) {
+		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+		if (!pages)
 			return NULL;
 	}
 
-	area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
-	if (!area) {
-		if (mem != stack)
-			kvfree(mem);
-		return NULL;
-	}
-
-	ptes = mem;
-	for (pfn = 0; pfn < n_pte; pfn++) {
-		struct page *page;
-
-		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
-						   GFP_KERNEL);
-		if (IS_ERR(page))
+	for (i = 0; i < n_pages; i++) {
+		pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
+						       GFP_KERNEL);
+		if (IS_ERR(pages[i]))
 			goto err_page;
-
-		**ptes++ = mk_pte(page,  PAGE_KERNEL);
 	}
 
-	if (mem != stack)
-		kvfree(mem);
+	vaddr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+	if (!vaddr)
+		goto err_page;
 
+	if (pages != stack)
+		kvfree(pages);
 	mapping_set_unevictable(file->f_mapping);
-	return area->addr;
+	return vaddr;
 
 err_page:
-	if (mem != stack)
-		kvfree(mem);
-
-	__shmem_unpin_map(file, area->addr, pfn);
+	while (--i >= 0)
+		put_page(pages[i]);
+	if (pages != stack)
+		kvfree(pages);
 	return NULL;
 }
 
 void shmem_unpin_map(struct file *file, void *ptr)
 {
+	long i = shmem_npages(file);
+
 	mapping_clear_unevictable(file->f_mapping);
-	__shmem_unpin_map(file, ptr, shmem_npte(file));
+	vunmap(ptr);
+
+	for (i = 0; i < shmem_npages(file); i++) {
+		struct page *page;
+
+		page = shmem_read_mapping_page_gfp(file->f_mapping, i,
+						   GFP_KERNEL);
+		if (!WARN_ON(IS_ERR(page))) {
+			put_page(page);
+			put_page(page);
+		}
+	}
 }
 
 static int __shmem_rw(struct file *file, loff_t off,
-- 
2.28.0


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	linux-mm@kvack.org, Peter Zijlstra <peterz@infradead.org>,
	intel-gfx@lists.freedesktop.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, Minchan Kim <minchan@kernel.org>,
	dri-devel@lists.freedesktop.org, xen-devel@lists.xenproject.org,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Nitin Gupta <ngupta@vflare.org>
Subject: [Intel-gfx] [PATCH 3/6] drm/i915: use vmap in shmem_pin_map
Date: Fri, 18 Sep 2020 18:37:21 +0200	[thread overview]
Message-ID: <20200918163724.2511-4-hch@lst.de> (raw)
In-Reply-To: <20200918163724.2511-1-hch@lst.de>

shmem_pin_map somewhat awkwardly reimplements vmap using
alloc_vm_area and manual pte setup.  The only practical difference
is that alloc_vm_area prefeaults the vmalloc area PTEs, which doesn't
seem to be required here (and could be added to vmap using a flag
if actually required).

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/gpu/drm/i915/gt/shmem_utils.c | 90 +++++++++++----------------
 1 file changed, 38 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 43c7acbdc79dea..77410091597f19 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -49,80 +49,66 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
 	return file;
 }
 
-static size_t shmem_npte(struct file *file)
+static size_t shmem_npages(struct file *file)
 {
 	return file->f_mapping->host->i_size >> PAGE_SHIFT;
 }
 
-static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
-{
-	unsigned long pfn;
-
-	vunmap(ptr);
-
-	for (pfn = 0; pfn < n_pte; pfn++) {
-		struct page *page;
-
-		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
-						   GFP_KERNEL);
-		if (!WARN_ON(IS_ERR(page))) {
-			put_page(page);
-			put_page(page);
-		}
-	}
-}
-
 void *shmem_pin_map(struct file *file)
 {
-	const size_t n_pte = shmem_npte(file);
-	pte_t *stack[32], **ptes, **mem;
-	struct vm_struct *area;
-	unsigned long pfn;
-
-	mem = stack;
-	if (n_pte > ARRAY_SIZE(stack)) {
-		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
-		if (!mem)
+	const size_t n_pages = shmem_npages(file);
+	struct page **pages, *stack[32];
+	void *vaddr;
+	long i;
+
+	pages = stack;
+	if (n_pages > ARRAY_SIZE(stack)) {
+		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+		if (!pages)
 			return NULL;
 	}
 
-	area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
-	if (!area) {
-		if (mem != stack)
-			kvfree(mem);
-		return NULL;
-	}
-
-	ptes = mem;
-	for (pfn = 0; pfn < n_pte; pfn++) {
-		struct page *page;
-
-		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
-						   GFP_KERNEL);
-		if (IS_ERR(page))
+	for (i = 0; i < n_pages; i++) {
+		pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
+						       GFP_KERNEL);
+		if (IS_ERR(pages[i]))
 			goto err_page;
-
-		**ptes++ = mk_pte(page,  PAGE_KERNEL);
 	}
 
-	if (mem != stack)
-		kvfree(mem);
+	vaddr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+	if (!vaddr)
+		goto err_page;
 
+	if (pages != stack)
+		kvfree(pages);
 	mapping_set_unevictable(file->f_mapping);
-	return area->addr;
+	return vaddr;
 
 err_page:
-	if (mem != stack)
-		kvfree(mem);
-
-	__shmem_unpin_map(file, area->addr, pfn);
+	while (--i >= 0)
+		put_page(pages[i]);
+	if (pages != stack)
+		kvfree(pages);
 	return NULL;
 }
 
 void shmem_unpin_map(struct file *file, void *ptr)
 {
+	long i = shmem_npages(file);
+
 	mapping_clear_unevictable(file->f_mapping);
-	__shmem_unpin_map(file, ptr, shmem_npte(file));
+	vunmap(ptr);
+
+	for (i = 0; i < shmem_npages(file); i++) {
+		struct page *page;
+
+		page = shmem_read_mapping_page_gfp(file->f_mapping, i,
+						   GFP_KERNEL);
+		if (!WARN_ON(IS_ERR(page))) {
+			put_page(page);
+			put_page(page);
+		}
+	}
 }
 
 static int __shmem_rw(struct file *file, loff_t off,
-- 
2.28.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-09-18 16:46 UTC|newest]

Thread overview: 85+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-18 16:37 remove alloc_vm_area Christoph Hellwig
2020-09-18 16:37 ` [Intel-gfx] " Christoph Hellwig
2020-09-18 16:37 ` [PATCH 1/6] zsmalloc: switch from alloc_vm_area to get_vm_area Christoph Hellwig
2020-09-18 16:37   ` [Intel-gfx] " Christoph Hellwig
2020-09-21 17:42   ` Minchan Kim
2020-09-21 17:42     ` [Intel-gfx] " Minchan Kim
2020-09-21 17:42     ` Minchan Kim
2020-09-21 18:17     ` Christoph Hellwig
2020-09-21 18:17       ` [Intel-gfx] " Christoph Hellwig
2020-09-21 18:42       ` Minchan Kim
2020-09-21 18:42         ` [Intel-gfx] " Minchan Kim
2020-09-21 18:42         ` Minchan Kim
2020-09-21 18:43         ` Christoph Hellwig
2020-09-21 18:43           ` [Intel-gfx] " Christoph Hellwig
2020-09-18 16:37 ` [PATCH 2/6] mm: add a vmap_pfn function Christoph Hellwig
2020-09-18 16:37   ` [Intel-gfx] " Christoph Hellwig
2020-09-18 16:37 ` Christoph Hellwig [this message]
2020-09-18 16:37   ` [Intel-gfx] [PATCH 3/6] drm/i915: use vmap in shmem_pin_map Christoph Hellwig
2020-09-21 19:11   ` Matthew Wilcox
2020-09-21 19:11     ` [Intel-gfx] " Matthew Wilcox
2020-09-21 19:11     ` Matthew Wilcox
2020-09-22  6:22     ` Christoph Hellwig
2020-09-22  6:22       ` [Intel-gfx] " Christoph Hellwig
2020-09-22  8:23       ` Tvrtko Ursulin
2020-09-22  8:23         ` Tvrtko Ursulin
2020-09-22  8:23         ` Tvrtko Ursulin
2020-09-22 14:31         ` Christoph Hellwig
2020-09-22 14:31           ` Christoph Hellwig
2020-09-22 16:13           ` Tvrtko Ursulin
2020-09-22 16:13             ` Tvrtko Ursulin
2020-09-22 16:13             ` Tvrtko Ursulin
2020-09-22 16:33             ` Christoph Hellwig
2020-09-22 16:33               ` Christoph Hellwig
2020-09-22 17:04               ` Tvrtko Ursulin
2020-09-22 17:04                 ` Tvrtko Ursulin
2020-09-22 17:04                 ` Tvrtko Ursulin
2020-09-23  6:11                 ` Christoph Hellwig
2020-09-23  6:11                   ` Christoph Hellwig
2020-09-22 11:21       ` Matthew Wilcox
2020-09-22 11:21         ` [Intel-gfx] " Matthew Wilcox
2020-09-22 11:21         ` Matthew Wilcox
2020-09-22 14:39         ` Christoph Hellwig
2020-09-22 14:39           ` [Intel-gfx] " Christoph Hellwig
2020-09-22 14:53           ` Matthew Wilcox
2020-09-22 14:53             ` [Intel-gfx] " Matthew Wilcox
2020-09-22 14:53             ` Matthew Wilcox
2020-09-18 16:37 ` [PATCH 4/6] drm/i915: use vmap in i915_gem_object_map Christoph Hellwig
2020-09-18 16:37   ` [Intel-gfx] " Christoph Hellwig
2020-09-23  9:52   ` Tvrtko Ursulin
2020-09-23  9:52     ` Tvrtko Ursulin
2020-09-23  9:52     ` Tvrtko Ursulin
2020-09-23 13:41     ` Christoph Hellwig
2020-09-23 13:41       ` Christoph Hellwig
2020-09-23 13:58       ` Tvrtko Ursulin
2020-09-23 13:58         ` Tvrtko Ursulin
2020-09-23 13:58         ` Tvrtko Ursulin
2020-09-23 14:44         ` Christoph Hellwig
2020-09-23 14:44           ` Christoph Hellwig
2020-09-24 12:22           ` Tvrtko Ursulin
2020-09-24 12:22             ` Tvrtko Ursulin
2020-09-24 12:22             ` Tvrtko Ursulin
2020-09-24 13:23             ` Christoph Hellwig
2020-09-24 13:23               ` Christoph Hellwig
2020-09-18 16:37 ` [PATCH 5/6] xen/xenbus: use apply_to_page_range directly in xenbus_map_ring_pv Christoph Hellwig
2020-09-18 16:37   ` [Intel-gfx] " Christoph Hellwig
2020-09-18 16:37 ` [PATCH 6/6] x86/xen: open code alloc_vm_area in arch_gnttab_valloc Christoph Hellwig
2020-09-18 16:37   ` [Intel-gfx] " Christoph Hellwig
2020-09-21 20:44   ` boris.ostrovsky
2020-09-21 20:44     ` [Intel-gfx] " boris.ostrovsky
2020-09-21 20:44     ` boris.ostrovsky
2020-09-22 14:58     ` Christoph Hellwig
2020-09-22 14:58       ` [Intel-gfx] " Christoph Hellwig
2020-09-22 15:24       ` boris.ostrovsky
2020-09-22 15:24         ` [Intel-gfx] " boris.ostrovsky
2020-09-22 15:24         ` boris.ostrovsky
2020-09-22 15:27         ` Christoph Hellwig
2020-09-22 15:27           ` [Intel-gfx] " Christoph Hellwig
2020-09-22 15:34           ` boris.ostrovsky
2020-09-22 15:34             ` [Intel-gfx] " boris.ostrovsky
2020-09-22 15:34             ` boris.ostrovsky
2020-09-18 17:03 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area Patchwork
2020-09-21 17:50 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev2) Patchwork
2020-09-21 18:47 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev3) Patchwork
2020-09-22 14:44 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev4) Patchwork
2020-09-22 15:01 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/6] zsmalloc: switch from alloc_vm_area to get_vm_area (rev5) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200918163724.2511-4-hch@lst.de \
    --to=hch@lst.de \
    --cc=akpm@linux-foundation.org \
    --cc=boris.ostrovsky@oracle.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jani.nikula@linux.intel.com \
    --cc=jgross@suse.com \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=minchan@kernel.org \
    --cc=ngupta@vflare.org \
    --cc=peterz@infradead.org \
    --cc=rodrigo.vivi@intel.com \
    --cc=sstabellini@kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.