All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] ARM: *: mm: Implement get_user_pages_fast()
@ 2016-09-21  2:00 Yuriy Romanenko
  2016-09-29  8:59 ` Rabin Vincent
  0 siblings, 1 reply; 2+ messages in thread
From: Yuriy Romanenko @ 2016-09-21  2:00 UTC (permalink / raw)
  To: linux-arm-kernel

>From 6be781314e78ad43d797915189145a0aae41f639 Mon Sep 17 00:00:00 2001
From: Yuriy Romanenko <yromanenko@carrietech.com>
Date: Tue, 20 Sep 2016 18:50:16 -0700
Subject: [PATCH] ARM: *: mm: Implement get_user_pages_fast()

Will do an unlocked walk of the page table, if that provides everything
necessary, it will succeed and return, otherwise it will call the old
slow path on the remainder

Signed-off-by: Yuriy Romanenko <yromanenko@carrietech.com>
---
 arch/arm/mm/Makefile |  2 +-
 arch/arm/mm/gup.c    | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 91 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm/mm/gup.c

diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 7f76d96..096cfcb 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
    iomap.o

 obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
-   mmap.o pgd.o mmu.o pageattr.o
+   mmap.o pgd.o mmu.o pageattr.o gup.o

 ifneq ($(CONFIG_MMU),y)
 obj-y += nommu.o
diff --git a/arch/arm/mm/gup.c b/arch/arm/mm/gup.c
new file mode 100644
index 0000000..6e57bc9
--- /dev/null
+++ b/arch/arm/mm/gup.c
@@ -0,0 +1,90 @@
+/*
+ * Lockless get_user_pages_fast for ARM
+ *
+ * Copyright (C) 2014 Lytro, Inc.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmstat.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+
+#include <asm/pgtable.h>
+
+struct gup_private_data {
+ int nr;
+ struct page **pages;
+ int write;
+};
+
+static int gup_pte_entry(pte_t *ptep, unsigned long start,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct gup_private_data *private_data =
+ (struct gup_private_data *)walk->private;
+ struct page * page;
+ pte_t pte = *ptep;
+ if (!pte_present(pte) ||
+ pte_special(pte) ||
+ (private_data->write && !pte_write(pte)))
+ {
+ return private_data->nr;
+ }
+ page = pte_page(pte);
+ get_page(page);
+ private_data->pages[private_data->nr++] = page;
+ return 0;
+}
+
+static int gup_pte_hole_entry(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct gup_private_data *private_data =
+ (struct gup_private_data *)walk->private;
+ return private_data->nr;
+}
+
+
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+ unsigned long page_addr = (start & PAGE_MASK);
+ int nr = 0;
+
+ struct gup_private_data private_data = {
+ .nr = 0,
+ .pages = pages,
+ .write = write
+ };
+
+ struct mm_walk gup_walk = {
+ .pte_entry = gup_pte_entry,
+ .pte_hole = gup_pte_hole_entry,
+ .mm = mm,
+ .private = (void *)&private_data
+ };
+
+ ret = walk_page_range(page_addr,
+ page_addr + nr_pages * PAGE_SIZE,
+ &gup_walk);
+ nr = ret ? ret : nr_pages;
+
+ if (nr == nr_pages)
+ {
+ return nr;
+ }
+ else
+ {
+ page_addr += (nr << PAGE_SHIFT);
+ }
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, page_addr,
+ nr_pages - nr, write, 0, pages + nr, NULL);
+ up_read(&mm->mmap_sem);
+
+ return (ret < 0) ? nr : (ret + nr);
+}
\ No newline@end of file
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [PATCH] ARM: *: mm: Implement get_user_pages_fast()
  2016-09-21  2:00 [PATCH] ARM: *: mm: Implement get_user_pages_fast() Yuriy Romanenko
@ 2016-09-29  8:59 ` Rabin Vincent
  0 siblings, 0 replies; 2+ messages in thread
From: Rabin Vincent @ 2016-09-29  8:59 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Sep 20, 2016 at 07:00:28PM -0700, Yuriy Romanenko wrote:
> +int get_user_pages_fast(unsigned long start, int nr_pages, int write,
> + struct page **pages)
> +{
> + struct mm_struct *mm = current->mm;
> + int ret;
> + unsigned long page_addr = (start & PAGE_MASK);
> + int nr = 0;
> +
> + struct gup_private_data private_data = {
> + .nr = 0,
> + .pages = pages,
> + .write = write
> + };
> +
> + struct mm_walk gup_walk = {
> + .pte_entry = gup_pte_entry,
> + .pte_hole = gup_pte_hole_entry,
> + .mm = mm,
> + .private = (void *)&private_data
> + };
> +
> + ret = walk_page_range(page_addr,
> + page_addr + nr_pages * PAGE_SIZE,
> + &gup_walk);
> + nr = ret ? ret : nr_pages;

walk_page_range() can't be called without the mmap_sem.

  * Locking:
  *   Callers of walk_page_range() and walk_page_vma() should hold
  *   @walk->mm->mmap_sem, because these function traverse vma list and/or
  *   access to vma's data.
  */
 int walk_page_range(unsigned long start, unsigned long end,
 		    struct mm_walk *walk)
 {
 	int err = 0;
 	unsigned long next;
 	struct vm_area_struct *vma;
 
 	if (start >= end)
 		return -EINVAL;
 
 	if (!walk->mm)
 		return -EINVAL;
 
 	VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-09-29  8:59 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-21  2:00 [PATCH] ARM: *: mm: Implement get_user_pages_fast() Yuriy Romanenko
2016-09-29  8:59 ` Rabin Vincent

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.