From: Logan Gunthorpe <logang@deltatee.com>
To: linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org,
linux-block@vger.kernel.org, linux-pci@vger.kernel.org,
linux-mm@kvack.org, iommu@lists.linux-foundation.org
Cc: "Stephen Bates" <sbates@raithlin.com>,
"Christoph Hellwig" <hch@lst.de>,
"Dan Williams" <dan.j.williams@intel.com>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Christian König" <christian.koenig@amd.com>,
"John Hubbard" <jhubbard@nvidia.com>,
"Don Dutile" <ddutile@redhat.com>,
"Matthew Wilcox" <willy@infradead.org>,
"Daniel Vetter" <daniel.vetter@ffwll.ch>,
"Jakowski Andrzej" <andrzej.jakowski@intel.com>,
"Minturn Dave B" <dave.b.minturn@intel.com>,
"Jason Ekstrand" <jason@jlekstrand.net>,
"Dave Hansen" <dave.hansen@linux.intel.com>,
"Xiong Jianxin" <jianxin.xiong@intel.com>,
"Bjorn Helgaas" <helgaas@kernel.org>,
"Ira Weiny" <ira.weiny@intel.com>,
"Robin Murphy" <robin.murphy@arm.com>,
"Martin Oliveira" <martin.oliveira@eideticom.com>,
"Chaitanya Kulkarni" <ckulkarnilinux@gmail.com>,
"Logan Gunthorpe" <logang@deltatee.com>
Subject: [PATCH v3 14/20] mm: introduce FOLL_PCI_P2PDMA to gate getting PCI P2PDMA pages
Date: Thu, 16 Sep 2021 17:40:54 -0600 [thread overview]
Message-ID: <20210916234100.122368-15-logang@deltatee.com> (raw)
In-Reply-To: <20210916234100.122368-1-logang@deltatee.com>
Callers that expect PCI P2PDMA pages can now set FOLL_PCI_P2PDMA to
allow obtaining P2PDMA pages. If a caller does not set this flag
and tries to map P2PDMA pages it will fail.
This is implemented by adding a flag and a check to get_dev_pagemap().
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
---
drivers/dax/super.c | 7 ++++---
include/linux/memremap.h | 4 ++--
include/linux/mm.h | 1 +
mm/gup.c | 28 +++++++++++++++++-----------
mm/huge_memory.c | 8 ++++----
mm/memory-failure.c | 4 ++--
mm/memory_hotplug.c | 2 +-
mm/memremap.c | 14 ++++++++++----
8 files changed, 41 insertions(+), 27 deletions(-)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index fc89e91beea7..ffb6e57e65bb 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -180,9 +180,10 @@ bool generic_fsdax_supported(struct dax_device *dax_dev,
} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
struct dev_pagemap *pgmap, *end_pgmap;
- pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
- end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
- if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
+ pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL, false);
+ end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL, false);
+ if (!IS_ERR_OR_NULL(pgmap) && pgmap == end_pgmap
+ && pgmap->type == MEMORY_DEVICE_FS_DAX
&& pfn_t_to_page(pfn)->pgmap == pgmap
&& pfn_t_to_page(end_pfn)->pgmap == pgmap
&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index c0e9d35889e8..f10c332dac8b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -136,7 +136,7 @@ void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap);
+ struct dev_pagemap *pgmap, bool allow_pci_p2pdma);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
@@ -161,7 +161,7 @@ static inline void devm_memunmap_pages(struct device *dev,
}
static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
+ struct dev_pagemap *pgmap, bool allow_pci_p2pdma)
{
return NULL;
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 73a52aba448f..6afdc09d0712 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2864,6 +2864,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
+#define FOLL_PCI_P2PDMA 0x100000 /* allow returning PCI P2PDMA pages */
/*
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
diff --git a/mm/gup.c b/mm/gup.c
index 886d6148d3d0..1a03b9200cd9 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -522,11 +522,16 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
* case since they are only valid while holding the pgmap
* reference.
*/
- *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
- if (*pgmap)
+ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap,
+ flags & FOLL_PCI_P2PDMA);
+ if (IS_ERR(*pgmap)) {
+ page = ERR_CAST(*pgmap);
+ goto out;
+ } else if (*pgmap) {
page = pte_page(pte);
- else
+ } else {
goto no_page;
+ }
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
@@ -846,7 +851,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
return NULL;
page = follow_page_mask(vma, address, foll_flags, &ctx);
- if (ctx.pgmap)
+ if (!IS_ERR_OR_NULL(ctx.pgmap))
put_dev_pagemap(ctx.pgmap);
return page;
}
@@ -1199,7 +1204,7 @@ static long __get_user_pages(struct mm_struct *mm,
nr_pages -= page_increm;
} while (nr_pages);
out:
- if (ctx.pgmap)
+ if (!IS_ERR_OR_NULL(ctx.pgmap))
put_dev_pagemap(ctx.pgmap);
return i ? i : ret;
}
@@ -2149,8 +2154,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
if (unlikely(flags & FOLL_LONGTERM))
goto pte_unmap;
- pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
- if (unlikely(!pgmap)) {
+ pgmap = get_dev_pagemap(pte_pfn(pte), pgmap,
+ flags & FOLL_PCI_P2PDMA);
+ if (IS_ERR_OR_NULL(pgmap)) {
undo_dev_pagemap(nr, nr_start, flags, pages);
goto pte_unmap;
}
@@ -2198,7 +2204,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
ret = 1;
pte_unmap:
- if (pgmap)
+ if (!IS_ERR_OR_NULL(pgmap))
put_dev_pagemap(pgmap);
pte_unmap(ptem);
return ret;
@@ -2233,8 +2239,8 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
do {
struct page *page = pfn_to_page(pfn);
- pgmap = get_dev_pagemap(pfn, pgmap);
- if (unlikely(!pgmap)) {
+ pgmap = get_dev_pagemap(pfn, pgmap, flags & FOLL_PCI_P2PDMA);
+ if (IS_ERR_OR_NULL(pgmap)) {
undo_dev_pagemap(nr, nr_start, flags, pages);
ret = 0;
break;
@@ -2708,7 +2714,7 @@ static int internal_get_user_pages_fast(unsigned long start,
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
FOLL_FORCE | FOLL_PIN | FOLL_GET |
- FOLL_FAST_ONLY)))
+ FOLL_FAST_ONLY | FOLL_PCI_P2PDMA)))
return -EINVAL;
if (gup_flags & FOLL_PIN)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5e9ef0fc261e..853157a84b00 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1014,8 +1014,8 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
return ERR_PTR(-EEXIST);
pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
- *pgmap = get_dev_pagemap(pfn, *pgmap);
- if (!*pgmap)
+ *pgmap = get_dev_pagemap(pfn, *pgmap, flags & FOLL_PCI_P2PDMA);
+ if (IS_ERR_OR_NULL(*pgmap))
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
if (!try_grab_page(page, flags))
@@ -1181,8 +1181,8 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
return ERR_PTR(-EEXIST);
pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
- *pgmap = get_dev_pagemap(pfn, *pgmap);
- if (!*pgmap)
+ *pgmap = get_dev_pagemap(pfn, *pgmap, flags & FOLL_PCI_P2PDMA);
+ if (IS_ERR_OR_NULL(*pgmap))
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
if (!try_grab_page(page, flags))
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 54879c339024..8f15ccce5aea 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1635,8 +1635,8 @@ int memory_failure(unsigned long pfn, int flags)
p = pfn_to_online_page(pfn);
if (!p) {
if (pfn_valid(pfn)) {
- pgmap = get_dev_pagemap(pfn, NULL);
- if (pgmap)
+ pgmap = get_dev_pagemap(pfn, NULL, false);
+ if (!IS_ERR_OR_NULL(pgmap))
return memory_failure_dev_pagemap(pfn, flags,
pgmap);
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9fd0be32a281..fa5cf8898b6b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -285,7 +285,7 @@ struct page *pfn_to_online_page(unsigned long pfn)
* the section may be 'offline' but 'valid'. Only
* get_dev_pagemap() can determine sub-section online status.
*/
- pgmap = get_dev_pagemap(pfn, NULL);
+ pgmap = get_dev_pagemap(pfn, NULL, true);
put_dev_pagemap(pgmap);
/* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
diff --git a/mm/memremap.c b/mm/memremap.c
index ed593bf87109..ceebdb8a72bb 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -206,14 +206,14 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
"altmap not supported for multiple ranges\n"))
return -EINVAL;
- conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
+ conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL, true);
if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
return -ENOMEM;
}
- conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
+ conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL, true);
if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
@@ -465,19 +465,20 @@ void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
* @pfn: page frame number to lookup page_map
* @pgmap: optional known pgmap that already has a reference
+ * @allow_pci_p2pdma: allow getting a pgmap with the PCI P2PDMA type
*
* If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
* is non-NULL but does not cover @pfn the reference to it will be released.
*/
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
+ struct dev_pagemap *pgmap, bool allow_pci_p2pdma)
{
resource_size_t phys = PFN_PHYS(pfn);
/*
* In the cached case we're already holding a live reference.
*/
- if (pgmap) {
+ if (!IS_ERR_OR_NULL(pgmap)) {
if (phys >= pgmap->range.start && phys <= pgmap->range.end)
return pgmap;
put_dev_pagemap(pgmap);
@@ -490,6 +491,11 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
pgmap = NULL;
rcu_read_unlock();
+ if (!allow_pci_p2pdma && pgmap->type == MEMORY_DEVICE_PCI_P2PDMA) {
+ put_dev_pagemap(pgmap);
+ return ERR_PTR(-EREMOTEIO);
+ }
+
return pgmap;
}
EXPORT_SYMBOL_GPL(get_dev_pagemap);
--
2.30.2
next prev parent reply other threads:[~2021-09-16 23:41 UTC|newest]
Thread overview: 87+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-16 23:40 [PATCH v3 00/20] Userspace P2PDMA with O_DIRECT NVMe devices Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 01/20] lib/scatterlist: add flag for indicating P2PDMA segments in an SGL Logan Gunthorpe
2021-09-28 18:32 ` Jason Gunthorpe
2021-09-29 21:15 ` Logan Gunthorpe
2021-09-30 4:47 ` Chaitanya Kulkarni
2021-09-30 16:49 ` Logan Gunthorpe
2021-09-30 4:57 ` Chaitanya Kulkarni
2021-09-16 23:40 ` [PATCH v3 02/20] PCI/P2PDMA: attempt to set map_type if it has not been set Logan Gunthorpe
2021-09-27 18:50 ` Bjorn Helgaas
2021-09-16 23:40 ` [PATCH v3 03/20] PCI/P2PDMA: make pci_p2pdma_map_type() non-static Logan Gunthorpe
2021-09-27 18:46 ` Bjorn Helgaas
2021-09-28 18:48 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 04/20] PCI/P2PDMA: introduce helpers for dma_map_sg implementations Logan Gunthorpe
2021-09-27 18:53 ` Bjorn Helgaas
2021-09-27 19:59 ` Logan Gunthorpe
2021-09-28 18:55 ` Jason Gunthorpe
2021-09-29 21:26 ` Logan Gunthorpe
2021-09-28 22:05 ` [PATCH v3 4/20] " Jason Gunthorpe
2021-09-29 21:30 ` Logan Gunthorpe
2021-09-29 22:46 ` Jason Gunthorpe
2021-09-29 23:00 ` Logan Gunthorpe
2021-09-29 23:40 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 05/20] dma-mapping: allow EREMOTEIO return code for P2PDMA transfers Logan Gunthorpe
2021-09-28 18:57 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 06/20] dma-direct: support PCI P2PDMA pages in dma-direct map_sg Logan Gunthorpe
2021-09-28 19:08 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 07/20] dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support Logan Gunthorpe
2021-09-28 19:11 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 08/20] iommu/dma: support PCI P2PDMA pages in dma-iommu map_sg Logan Gunthorpe
2021-09-28 19:15 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 09/20] nvme-pci: check DMA ops when indicating support for PCI P2PDMA Logan Gunthorpe
2021-09-30 5:06 ` Chaitanya Kulkarni
2021-09-30 16:51 ` Logan Gunthorpe
2021-09-30 17:19 ` Chaitanya Kulkarni
2021-09-16 23:40 ` [PATCH v3 10/20] nvme-pci: convert to using dma_map_sgtable() Logan Gunthorpe
2021-10-05 22:29 ` Max Gurtovoy
2021-09-16 23:40 ` [PATCH v3 11/20] RDMA/core: introduce ib_dma_pci_p2p_dma_supported() Logan Gunthorpe
2021-09-28 19:17 ` Jason Gunthorpe
2021-10-05 22:31 ` Max Gurtovoy
2021-09-16 23:40 ` [PATCH v3 12/20] RDMA/rw: use dma_map_sgtable() Logan Gunthorpe
2021-09-28 19:43 ` Jason Gunthorpe
2021-09-29 22:56 ` Logan Gunthorpe
2021-10-05 22:40 ` Max Gurtovoy
2021-09-16 23:40 ` [PATCH v3 13/20] PCI/P2PDMA: remove pci_p2pdma_[un]map_sg() Logan Gunthorpe
2021-09-27 18:50 ` Bjorn Helgaas
2021-09-28 19:43 ` Jason Gunthorpe
2021-10-05 22:42 ` Max Gurtovoy
2021-09-16 23:40 ` Logan Gunthorpe [this message]
2021-09-28 19:47 ` [PATCH v3 14/20] mm: introduce FOLL_PCI_P2PDMA to gate getting PCI P2PDMA pages Jason Gunthorpe
2021-09-29 21:34 ` Logan Gunthorpe
2021-09-29 22:48 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 15/20] iov_iter: introduce iov_iter_get_pages_[alloc_]flags() Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 16/20] block: set FOLL_PCI_P2PDMA in __bio_iov_iter_get_pages() Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 17/20] block: set FOLL_PCI_P2PDMA in bio_map_user_iov() Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 18/20] mm: use custom page_free for P2PDMA pages Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 19/20] PCI/P2PDMA: introduce pci_mmap_p2pmem() Logan Gunthorpe
2021-09-27 18:49 ` Bjorn Helgaas
2021-09-28 19:55 ` Jason Gunthorpe
2021-09-29 21:42 ` Logan Gunthorpe
2021-09-29 23:05 ` Jason Gunthorpe
2021-09-29 23:27 ` Logan Gunthorpe
2021-09-29 23:35 ` Jason Gunthorpe
2021-09-29 23:49 ` Logan Gunthorpe
2021-09-30 0:36 ` Jason Gunthorpe
2021-10-01 13:48 ` Jason Gunthorpe
2021-10-01 17:01 ` Logan Gunthorpe
2021-10-01 17:45 ` Jason Gunthorpe
2021-10-01 20:13 ` Logan Gunthorpe
2021-10-01 22:14 ` Jason Gunthorpe
2021-10-01 22:22 ` Logan Gunthorpe
2021-10-01 22:46 ` Jason Gunthorpe
2021-10-01 23:27 ` John Hubbard
2021-10-01 23:34 ` Logan Gunthorpe
2021-10-04 6:58 ` Christian König
2021-10-04 13:11 ` Jason Gunthorpe
2021-10-04 13:22 ` Christian König
2021-10-04 13:27 ` Jason Gunthorpe
2021-10-04 14:54 ` Christian König
2021-09-28 20:05 ` Jason Gunthorpe
2021-09-29 21:46 ` Logan Gunthorpe
2021-09-16 23:41 ` [PATCH v3 20/20] nvme-pci: allow mmaping the CMB in userspace Logan Gunthorpe
2021-09-28 20:02 ` [PATCH v3 00/20] Userspace P2PDMA with O_DIRECT NVMe devices Jason Gunthorpe
2021-09-29 21:50 ` Logan Gunthorpe
2021-09-29 23:21 ` Jason Gunthorpe
2021-09-29 23:28 ` Logan Gunthorpe
2021-09-29 23:36 ` Jason Gunthorpe
2021-09-29 23:52 ` Logan Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210916234100.122368-15-logang@deltatee.com \
--to=logang@deltatee.com \
--cc=andrzej.jakowski@intel.com \
--cc=christian.koenig@amd.com \
--cc=ckulkarnilinux@gmail.com \
--cc=dan.j.williams@intel.com \
--cc=daniel.vetter@ffwll.ch \
--cc=dave.b.minturn@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=ddutile@redhat.com \
--cc=hch@lst.de \
--cc=helgaas@kernel.org \
--cc=iommu@lists.linux-foundation.org \
--cc=ira.weiny@intel.com \
--cc=jason@jlekstrand.net \
--cc=jgg@ziepe.ca \
--cc=jhubbard@nvidia.com \
--cc=jianxin.xiong@intel.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-pci@vger.kernel.org \
--cc=martin.oliveira@eideticom.com \
--cc=robin.murphy@arm.com \
--cc=sbates@raithlin.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).