From: ira.weiny@intel.com
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>,
Dan Williams <dan.j.williams@intel.com>,
Matthew Wilcox <willy@infradead.org>, Jan Kara <jack@suse.cz>,
"Theodore Ts'o" <tytso@mit.edu>,
John Hubbard <jhubbard@nvidia.com>,
Michal Hocko <mhocko@suse.com>,
Dave Chinner <david@fromorbit.com>,
linux-xfs@vger.kernel.org, linux-rdma@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-nvdimm@lists.01.org, linux-ext4@vger.kernel.org,
linux-mm@kvack.org, Ira Weiny <ira.weiny@intel.com>
Subject: [RFC PATCH v2 10/19] mm/gup: Pass a NULL vaddr_pin through GUP fast
Date: Fri, 9 Aug 2019 15:58:24 -0700 [thread overview]
Message-ID: <20190809225833.6657-11-ira.weiny@intel.com> (raw)
In-Reply-To: <20190809225833.6657-1-ira.weiny@intel.com>
From: Ira Weiny <ira.weiny@intel.com>
Internally GUP fast needs to know that fast users will not support file
pins. Pass NULL for vaddr_pin through the fast call stack so that the
pin code can return an error if it encounters file backed memory within
the address range.
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
mm/gup.c | 65 ++++++++++++++++++++++++++++++++++----------------------
1 file changed, 40 insertions(+), 25 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 7a449500f0a6..504af3e9a942 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1813,7 +1813,8 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+ unsigned int flags, struct page **pages, int *nr,
+ struct vaddr_pin *vaddr_pin)
{
struct dev_pagemap *pgmap = NULL;
int nr_start = *nr, ret = 0;
@@ -1894,7 +1895,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
* useful to have gup_huge_pmd even if we can't operate on ptes.
*/
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+ unsigned int flags, struct page **pages, int *nr,
+ struct vaddr_pin *vaddr_pin)
{
return 0;
}
@@ -1903,7 +1905,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
unsigned long end, struct page **pages, int *nr,
- unsigned int flags)
+ unsigned int flags, struct vaddr_pin *vaddr_pin)
{
int nr_start = *nr;
struct dev_pagemap *pgmap = NULL;
@@ -1938,13 +1940,14 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr,
- unsigned int flags)
+ unsigned int flags, struct vaddr_pin *vaddr_pin)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- if (!__gup_device_huge(fault_pfn, addr, end, pages, nr, flags))
+ if (!__gup_device_huge(fault_pfn, addr, end, pages, nr, flags,
+ vaddr_pin))
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
@@ -1957,13 +1960,14 @@ static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr,
- unsigned int flags)
+ unsigned int flags, struct vaddr_pin *vaddr_pin)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- if (!__gup_device_huge(fault_pfn, addr, end, pages, nr, flags))
+ if (!__gup_device_huge(fault_pfn, addr, end, pages, nr, flags,
+ vaddr_pin))
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
@@ -1975,7 +1979,7 @@ static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
#else
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, struct page **pages, int *nr,
- unsigned int flags)
+ unsigned int flags, struct vaddr_pin *vaddr_pin)
{
BUILD_BUG();
return 0;
@@ -1983,7 +1987,7 @@ static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
unsigned long end, struct page **pages, int *nr,
- unsigned int flags)
+ unsigned int flags, struct vaddr_pin *vaddr_pin)
{
BUILD_BUG();
return 0;
@@ -2075,7 +2079,8 @@ static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
#endif /* CONFIG_ARCH_HAS_HUGEPD */
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
- unsigned long end, unsigned int flags, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags, struct page **pages,
+ int *nr, struct vaddr_pin *vaddr_pin)
{
struct page *head, *page;
int refs;
@@ -2087,7 +2092,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
if (unlikely(flags & FOLL_LONGTERM))
return 0;
return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr,
- flags);
+ flags, vaddr_pin);
}
refs = 0;
@@ -2117,7 +2122,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
}
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
- unsigned long end, unsigned int flags, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags, struct page **pages, int *nr,
+ struct vaddr_pin *vaddr_pin)
{
struct page *head, *page;
int refs;
@@ -2129,7 +2135,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
if (unlikely(flags & FOLL_LONGTERM))
return 0;
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr,
- flags);
+ flags, vaddr_pin);
}
refs = 0;
@@ -2196,7 +2202,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+ unsigned int flags, struct page **pages, int *nr,
+ struct vaddr_pin *vaddr_pin)
{
unsigned long next;
pmd_t *pmdp;
@@ -2220,7 +2227,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
- pages, nr))
+ pages, nr, vaddr_pin))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
@@ -2231,7 +2238,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
PMD_SHIFT, next, flags, pages, nr))
return 0;
- } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
+ } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr,
+ vaddr_pin))
return 0;
} while (pmdp++, addr = next, addr != end);
@@ -2239,7 +2247,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
}
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+ unsigned int flags, struct page **pages, int *nr,
+ struct vaddr_pin *vaddr_pin)
{
unsigned long next;
pud_t *pudp;
@@ -2253,13 +2262,14 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
return 0;
if (unlikely(pud_huge(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, flags,
- pages, nr))
+ pages, nr, vaddr_pin))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
PUD_SHIFT, next, flags, pages, nr))
return 0;
- } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr))
+ } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr,
+ vaddr_pin))
return 0;
} while (pudp++, addr = next, addr != end);
@@ -2267,7 +2277,8 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
}
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+ unsigned int flags, struct page **pages, int *nr,
+ struct vaddr_pin *vaddr_pin)
{
unsigned long next;
p4d_t *p4dp;
@@ -2284,7 +2295,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
P4D_SHIFT, next, flags, pages, nr))
return 0;
- } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr))
+ } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr,
+ vaddr_pin))
return 0;
} while (p4dp++, addr = next, addr != end);
@@ -2292,7 +2304,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
}
static void gup_pgd_range(unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+ unsigned int flags, struct page **pages, int *nr,
+ struct vaddr_pin *vaddr_pin)
{
unsigned long next;
pgd_t *pgdp;
@@ -2312,7 +2325,8 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
PGDIR_SHIFT, next, flags, pages, nr))
return;
- } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr))
+ } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr,
+ vaddr_pin))
return;
} while (pgdp++, addr = next, addr != end);
}
@@ -2374,7 +2388,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
gup_fast_permitted(start, end)) {
local_irq_save(flags);
- gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
+ gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr,
+ NULL);
local_irq_restore(flags);
}
@@ -2445,7 +2460,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
gup_fast_permitted(start, end)) {
local_irq_disable();
- gup_pgd_range(addr, end, gup_flags, pages, &nr);
+ gup_pgd_range(addr, end, gup_flags, pages, &nr, NULL);
local_irq_enable();
ret = nr;
}
--
2.20.1
next prev parent reply other threads:[~2019-08-09 22:58 UTC|newest]
Thread overview: 110+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-09 22:58 [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ;-) ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 01/19] fs/locks: Export F_LAYOUT lease to user space ira.weiny
2019-08-09 23:52 ` Dave Chinner
2019-08-12 17:36 ` Ira Weiny
2019-08-14 8:05 ` Dave Chinner
2019-08-14 11:21 ` Jeff Layton
2019-08-14 11:38 ` Dave Chinner
2019-08-09 22:58 ` [RFC PATCH v2 02/19] fs/locks: Add Exclusive flag to user Layout lease ira.weiny
2019-08-14 14:15 ` Jeff Layton
2019-08-14 21:56 ` Dave Chinner
2019-08-26 10:41 ` Jeff Layton
2019-08-29 23:34 ` Ira Weiny
2019-09-04 12:52 ` Jeff Layton
2019-09-04 23:12 ` John Hubbard
2019-08-09 22:58 ` [RFC PATCH v2 03/19] mm/gup: Pass flags down to __gup_device_huge* calls ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 04/19] mm/gup: Ensure F_LAYOUT lease is held prior to GUP'ing pages ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 05/19] fs/ext4: Teach ext4 to break layout leases ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 06/19] fs/ext4: Teach dax_layout_busy_page() to operate on a sub-range ira.weiny
2019-08-23 15:18 ` Vivek Goyal
2019-08-29 18:52 ` Ira Weiny
2019-08-09 22:58 ` [RFC PATCH v2 07/19] fs/xfs: Teach xfs to use new dax_layout_busy_page() ira.weiny
2019-08-09 23:30 ` Dave Chinner
2019-08-12 18:05 ` Ira Weiny
2019-08-14 8:04 ` Dave Chinner
2019-08-09 22:58 ` [RFC PATCH v2 08/19] fs/xfs: Fail truncate if page lease can't be broken ira.weiny
2019-08-09 23:22 ` Dave Chinner
2019-08-12 18:08 ` Ira Weiny
2019-08-09 22:58 ` [RFC PATCH v2 09/19] mm/gup: Introduce vaddr_pin structure ira.weiny
2019-08-10 0:06 ` John Hubbard
2019-08-09 22:58 ` ira.weiny [this message]
2019-08-10 0:06 ` [RFC PATCH v2 10/19] mm/gup: Pass a NULL vaddr_pin through GUP fast John Hubbard
2019-08-09 22:58 ` [RFC PATCH v2 11/19] mm/gup: Pass follow_page_context further down the call stack ira.weiny
2019-08-10 0:18 ` John Hubbard
2019-08-12 19:01 ` Ira Weiny
2019-08-09 22:58 ` [RFC PATCH v2 12/19] mm/gup: Prep put_user_pages() to take an vaddr_pin struct ira.weiny
2019-08-10 0:30 ` John Hubbard
2019-08-12 20:46 ` Ira Weiny
2019-08-09 22:58 ` [RFC PATCH v2 13/19] {mm,file}: Add file_pins objects ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 14/19] fs/locks: Associate file pins while performing GUP ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 15/19] mm/gup: Introduce vaddr_pin_pages() ira.weiny
2019-08-10 0:09 ` John Hubbard
2019-08-12 21:00 ` Ira Weiny
2019-08-12 21:20 ` John Hubbard
2019-08-11 23:07 ` John Hubbard
2019-08-12 21:01 ` Ira Weiny
2019-08-12 12:28 ` Jason Gunthorpe
2019-08-12 21:48 ` Ira Weiny
2019-08-13 11:47 ` Jason Gunthorpe
2019-08-13 17:46 ` Ira Weiny
2019-08-13 17:56 ` John Hubbard
2019-08-09 22:58 ` [RFC PATCH v2 16/19] RDMA/uverbs: Add back pointer to system file object ira.weiny
2019-08-12 13:00 ` Jason Gunthorpe
2019-08-12 17:28 ` Ira Weiny
2019-08-12 17:56 ` Jason Gunthorpe
2019-08-12 21:15 ` Ira Weiny
2019-08-13 11:48 ` Jason Gunthorpe
2019-08-13 17:41 ` Ira Weiny
2019-08-13 18:00 ` Jason Gunthorpe
2019-08-13 20:38 ` Ira Weiny
2019-08-14 12:23 ` Jason Gunthorpe
2019-08-14 17:50 ` Ira Weiny
2019-08-14 18:15 ` Jason Gunthorpe
2019-09-04 22:25 ` Ira Weiny
2019-09-11 8:19 ` Jason Gunthorpe
2019-08-09 22:58 ` [RFC PATCH v2 17/19] RDMA/umem: Convert to vaddr_[pin|unpin]* operations ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 18/19] {mm,procfs}: Add display file_pins proc ira.weiny
2019-08-09 22:58 ` [RFC PATCH v2 19/19] mm/gup: Remove FOLL_LONGTERM DAX exclusion ira.weiny
2019-08-14 10:17 ` [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ;-) Jan Kara
2019-08-14 18:08 ` Ira Weiny
2019-08-15 13:05 ` Jan Kara
2019-08-16 19:05 ` Ira Weiny
2019-08-16 23:20 ` [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ; -) Ira Weiny
2019-08-19 6:36 ` Jan Kara
2019-08-17 2:26 ` [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ;-) Dave Chinner
2019-08-19 6:34 ` Jan Kara
2019-08-19 9:24 ` Dave Chinner
2019-08-19 12:38 ` Jason Gunthorpe
2019-08-19 21:53 ` Ira Weiny
2019-08-20 1:12 ` Dave Chinner
2019-08-20 11:55 ` Jason Gunthorpe
2019-08-21 18:02 ` Ira Weiny
2019-08-21 18:13 ` Jason Gunthorpe
2019-08-21 18:22 ` John Hubbard
2019-08-21 18:57 ` Ira Weiny
2019-08-21 19:06 ` Ira Weiny
2019-08-21 19:48 ` Jason Gunthorpe
2019-08-21 20:44 ` Ira Weiny
2019-08-21 23:49 ` Jason Gunthorpe
2019-08-23 3:23 ` Dave Chinner
2019-08-23 12:04 ` Jason Gunthorpe
2019-08-24 0:11 ` Dave Chinner
2019-08-24 5:08 ` Ira Weiny
2019-08-26 5:55 ` Dave Chinner
2019-08-29 2:02 ` Ira Weiny
2019-08-29 3:27 ` John Hubbard
2019-08-29 16:16 ` Ira Weiny
2019-09-02 22:26 ` Dave Chinner
2019-09-04 16:54 ` Ira Weiny
2019-08-25 19:39 ` Jason Gunthorpe
2019-08-24 4:49 ` Ira Weiny
2019-08-25 19:40 ` Jason Gunthorpe
2019-08-23 0:59 ` Dave Chinner
2019-08-23 17:15 ` Ira Weiny
2019-08-24 0:18 ` Dave Chinner
2019-08-20 0:05 ` John Hubbard
2019-08-20 1:20 ` Dave Chinner
2019-08-20 3:09 ` John Hubbard
2019-08-20 3:36 ` Dave Chinner
2019-08-21 18:43 ` John Hubbard
2019-08-21 19:09 ` Ira Weiny
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190809225833.6657-11-ira.weiny@intel.com \
--to=ira.weiny@intel.com \
--cc=akpm@linux-foundation.org \
--cc=dan.j.williams@intel.com \
--cc=david@fromorbit.com \
--cc=jack@suse.cz \
--cc=jgg@ziepe.ca \
--cc=jhubbard@nvidia.com \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvdimm@lists.01.org \
--cc=linux-rdma@vger.kernel.org \
--cc=linux-xfs@vger.kernel.org \
--cc=mhocko@suse.com \
--cc=tytso@mit.edu \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).