* [Xen-devel] [PATCH v2 01/34] mm/gup: add make_dirty arg to put_user_pages_dirty_lock()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-06 17:39 ` Ira Weiny
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 02/34] net/rds: convert put_page() to put_user_page*() john.hubbard
` (32 subsequent siblings)
33 siblings, 1 reply; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, Matthew Wilcox, sparclinux, Ira Weiny, ceph-devel,
devel, rds-devel, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
Provide a more capable variation of put_user_pages_dirty_lock(),
and delete put_user_pages_dirty(). This is based on the
following:
1. Lots of call sites become simpler if a bool is passed
into put_user_page*(), instead of making the call site
choose which put_user_page*() variant to call.
2. Christoph Hellwig's observation that set_page_dirty_lock()
is usually correct, and set_page_dirty() is usually a
bug, or at least questionable, within a put_user_page*()
calling chain.
This leads to the following API choices:
* put_user_pages_dirty_lock(page, npages, make_dirty)
* There is no put_user_pages_dirty(). You have to
hand code that, in the rare case that it's
required.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/infiniband/core/umem.c | 5 +-
drivers/infiniband/hw/hfi1/user_pages.c | 5 +-
drivers/infiniband/hw/qib/qib_user_pages.c | 13 +--
drivers/infiniband/hw/usnic/usnic_uiom.c | 5 +-
drivers/infiniband/sw/siw/siw_mem.c | 19 +---
include/linux/mm.h | 5 +-
mm/gup.c | 115 +++++++++------------
7 files changed, 61 insertions(+), 106 deletions(-)
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 08da840ed7ee..965cf9dea71a 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -54,10 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
page = sg_page_iter_page(&sg_iter);
- if (umem->writable && dirty)
- put_user_pages_dirty_lock(&page, 1);
- else
- put_user_page(page);
+ put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
}
sg_free_table(&umem->sg_head);
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index b89a9b9aef7a..469acb961fbd 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -118,10 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
size_t npages, bool dirty)
{
- if (dirty)
- put_user_pages_dirty_lock(p, npages);
- else
- put_user_pages(p, npages);
+ put_user_pages_dirty_lock(p, npages, dirty);
if (mm) { /* during close after signal, mm can be NULL */
atomic64_sub(npages, &mm->pinned_vm);
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index bfbfbb7e0ff4..26c1fb8d45cc 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -37,15 +37,6 @@
#include "qib.h"
-static void __qib_release_user_pages(struct page **p, size_t num_pages,
- int dirty)
-{
- if (dirty)
- put_user_pages_dirty_lock(p, num_pages);
- else
- put_user_pages(p, num_pages);
-}
-
/**
* qib_map_page - a safety wrapper around pci_map_page()
*
@@ -124,7 +115,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
return 0;
bail_release:
- __qib_release_user_pages(p, got, 0);
+ put_user_pages_dirty_lock(p, got, false);
bail:
atomic64_sub(num_pages, ¤t->mm->pinned_vm);
return ret;
@@ -132,7 +123,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
void qib_release_user_pages(struct page **p, size_t num_pages)
{
- __qib_release_user_pages(p, num_pages, 1);
+ put_user_pages_dirty_lock(p, num_pages, true);
/* during close after signal, mm can be NULL */
if (current->mm)
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 0b0237d41613..62e6ffa9ad78 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -75,10 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg);
pa = sg_phys(sg);
- if (dirty)
- put_user_pages_dirty_lock(&page, 1);
- else
- put_user_page(page);
+ put_user_pages_dirty_lock(&page, 1, dirty);
usnic_dbg("pa: %pa\n", &pa);
}
kfree(chunk);
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..1e197753bf2f 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -60,20 +60,6 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
return NULL;
}
-static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
- bool dirty)
-{
- struct page **p = chunk->plist;
-
- while (num_pages--) {
- if (!PageDirty(*p) && dirty)
- put_user_pages_dirty_lock(p, 1);
- else
- put_user_page(*p);
- p++;
- }
-}
-
void siw_umem_release(struct siw_umem *umem, bool dirty)
{
struct mm_struct *mm_s = umem->owning_mm;
@@ -82,8 +68,9 @@ void siw_umem_release(struct siw_umem *umem, bool dirty)
for (i = 0; num_pages; i++) {
int to_free = min_t(int, PAGES_PER_CHUNK, num_pages);
- siw_free_plist(&umem->page_chunk[i], to_free,
- umem->writable && dirty);
+ put_user_pages_dirty_lock(umem->page_chunk[i].plist,
+ to_free,
+ umem->writable && dirty);
kfree(umem->page_chunk[i].plist);
num_pages -= to_free;
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0334ca97c584..9759b6a24420 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1057,8 +1057,9 @@ static inline void put_user_page(struct page *page)
put_page(page);
}
-void put_user_pages_dirty(struct page **pages, unsigned long npages);
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty);
+
void put_user_pages(struct page **pages, unsigned long npages);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
diff --git a/mm/gup.c b/mm/gup.c
index 98f13ab37bac..7fefd7ab02c4 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,85 +29,70 @@ struct follow_page_context {
unsigned int page_mask;
};
-typedef int (*set_dirty_func_t)(struct page *page);
-
-static void __put_user_pages_dirty(struct page **pages,
- unsigned long npages,
- set_dirty_func_t sdf)
-{
- unsigned long index;
-
- for (index = 0; index < npages; index++) {
- struct page *page = compound_head(pages[index]);
-
- /*
- * Checking PageDirty at this point may race with
- * clear_page_dirty_for_io(), but that's OK. Two key cases:
- *
- * 1) This code sees the page as already dirty, so it skips
- * the call to sdf(). That could happen because
- * clear_page_dirty_for_io() called page_mkclean(),
- * followed by set_page_dirty(). However, now the page is
- * going to get written back, which meets the original
- * intention of setting it dirty, so all is well:
- * clear_page_dirty_for_io() goes on to call
- * TestClearPageDirty(), and write the page back.
- *
- * 2) This code sees the page as clean, so it calls sdf().
- * The page stays dirty, despite being written back, so it
- * gets written back again in the next writeback cycle.
- * This is harmless.
- */
- if (!PageDirty(page))
- sdf(page);
-
- put_user_page(page);
- }
-}
-
/**
- * put_user_pages_dirty() - release and dirty an array of gup-pinned pages
- * @pages: array of pages to be marked dirty and released.
+ * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
+ * @pages: array of pages to be maybe marked dirty, and definitely released.
* @npages: number of pages in the @pages array.
+ * @make_dirty: whether to mark the pages dirty
*
* "gup-pinned page" refers to a page that has had one of the get_user_pages()
* variants called on that page.
*
* For each page in the @pages array, make that page (or its head page, if a
- * compound page) dirty, if it was previously listed as clean. Then, release
- * the page using put_user_page().
+ * compound page) dirty, if @make_dirty is true, and if the page was previously
+ * listed as clean. In any case, releases all pages using put_user_page(),
+ * possibly via put_user_pages(), for the non-dirty case.
*
* Please see the put_user_page() documentation for details.
*
- * set_page_dirty(), which does not lock the page, is used here.
- * Therefore, it is the caller's responsibility to ensure that this is
- * safe. If not, then put_user_pages_dirty_lock() should be called instead.
+ * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
+ * required, then the caller should a) verify that this is really correct,
+ * because _lock() is usually required, and b) hand code it:
+ * set_page_dirty_lock(), put_user_page().
*
*/
-void put_user_pages_dirty(struct page **pages, unsigned long npages)
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty)
{
- __put_user_pages_dirty(pages, npages, set_page_dirty);
-}
-EXPORT_SYMBOL(put_user_pages_dirty);
+ unsigned long index;
-/**
- * put_user_pages_dirty_lock() - release and dirty an array of gup-pinned pages
- * @pages: array of pages to be marked dirty and released.
- * @npages: number of pages in the @pages array.
- *
- * For each page in the @pages array, make that page (or its head page, if a
- * compound page) dirty, if it was previously listed as clean. Then, release
- * the page using put_user_page().
- *
- * Please see the put_user_page() documentation for details.
- *
- * This is just like put_user_pages_dirty(), except that it invokes
- * set_page_dirty_lock(), instead of set_page_dirty().
- *
- */
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages)
-{
- __put_user_pages_dirty(pages, npages, set_page_dirty_lock);
+ /*
+ * TODO: this can be optimized for huge pages: if a series of pages is
+ * physically contiguous and part of the same compound page, then a
+ * single operation to the head page should suffice.
+ */
+
+ if (!make_dirty) {
+ put_user_pages(pages, npages);
+ return;
+ }
+
+ for (index = 0; index < npages; index++) {
+ struct page *page = compound_head(pages[index]);
+ /*
+ * Checking PageDirty at this point may race with
+ * clear_page_dirty_for_io(), but that's OK. Two key
+ * cases:
+ *
+ * 1) This code sees the page as already dirty, so it
+ * skips the call to set_page_dirty(). That could happen
+ * because clear_page_dirty_for_io() called
+ * page_mkclean(), followed by set_page_dirty().
+ * However, now the page is going to get written back,
+ * which meets the original intention of setting it
+ * dirty, so all is well: clear_page_dirty_for_io() goes
+ * on to call TestClearPageDirty(), and write the page
+ * back.
+ *
+ * 2) This code sees the page as clean, so it calls
+ * set_page_dirty(). The page stays dirty, despite being
+ * written back, so it gets written back again in the
+ * next writeback cycle. This is harmless.
+ */
+ if (!PageDirty(page))
+ set_page_dirty_lock(page);
+ put_user_page(page);
+ }
}
EXPORT_SYMBOL(put_user_pages_dirty_lock);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* Re: [Xen-devel] [PATCH v2 01/34] mm/gup: add make_dirty arg to put_user_pages_dirty_lock()
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 01/34] mm/gup: add make_dirty arg to put_user_pages_dirty_lock() john.hubbard
@ 2019-08-06 17:39 ` Ira Weiny
2019-08-06 20:39 ` John Hubbard
0 siblings, 1 reply; 41+ messages in thread
From: Ira Weiny @ 2019-08-06 17:39 UTC (permalink / raw)
To: john.hubbard
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, Matthew Wilcox, sparclinux, Dan Williams, devel,
rds-devel, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, ceph-devel,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, linux-fsdevel, Andrew Morton
On Sun, Aug 04, 2019 at 03:48:42PM -0700, john.hubbard@gmail.com wrote:
> From: John Hubbard <jhubbard@nvidia.com>
>
> Provide a more capable variation of put_user_pages_dirty_lock(),
> and delete put_user_pages_dirty(). This is based on the
> following:
>
> 1. Lots of call sites become simpler if a bool is passed
> into put_user_page*(), instead of making the call site
> choose which put_user_page*() variant to call.
>
> 2. Christoph Hellwig's observation that set_page_dirty_lock()
> is usually correct, and set_page_dirty() is usually a
> bug, or at least questionable, within a put_user_page*()
> calling chain.
>
> This leads to the following API choices:
>
> * put_user_pages_dirty_lock(page, npages, make_dirty)
>
> * There is no put_user_pages_dirty(). You have to
> hand code that, in the rare case that it's
> required.
>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Cc: Matthew Wilcox <willy@infradead.org>
> Cc: Jan Kara <jack@suse.cz>
> Cc: Ira Weiny <ira.weiny@intel.com>
> Cc: Jason Gunthorpe <jgg@ziepe.ca>
> Signed-off-by: John Hubbard <jhubbard@nvidia.com>
> ---
> drivers/infiniband/core/umem.c | 5 +-
> drivers/infiniband/hw/hfi1/user_pages.c | 5 +-
> drivers/infiniband/hw/qib/qib_user_pages.c | 13 +--
> drivers/infiniband/hw/usnic/usnic_uiom.c | 5 +-
> drivers/infiniband/sw/siw/siw_mem.c | 19 +---
> include/linux/mm.h | 5 +-
> mm/gup.c | 115 +++++++++------------
> 7 files changed, 61 insertions(+), 106 deletions(-)
>
> diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
> index 08da840ed7ee..965cf9dea71a 100644
> --- a/drivers/infiniband/core/umem.c
> +++ b/drivers/infiniband/core/umem.c
> @@ -54,10 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
>
> for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
> page = sg_page_iter_page(&sg_iter);
> - if (umem->writable && dirty)
> - put_user_pages_dirty_lock(&page, 1);
> - else
> - put_user_page(page);
> + put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
> }
>
> sg_free_table(&umem->sg_head);
> diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
> index b89a9b9aef7a..469acb961fbd 100644
> --- a/drivers/infiniband/hw/hfi1/user_pages.c
> +++ b/drivers/infiniband/hw/hfi1/user_pages.c
> @@ -118,10 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
> void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
> size_t npages, bool dirty)
> {
> - if (dirty)
> - put_user_pages_dirty_lock(p, npages);
> - else
> - put_user_pages(p, npages);
> + put_user_pages_dirty_lock(p, npages, dirty);
>
> if (mm) { /* during close after signal, mm can be NULL */
> atomic64_sub(npages, &mm->pinned_vm);
> diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
> index bfbfbb7e0ff4..26c1fb8d45cc 100644
> --- a/drivers/infiniband/hw/qib/qib_user_pages.c
> +++ b/drivers/infiniband/hw/qib/qib_user_pages.c
> @@ -37,15 +37,6 @@
>
> #include "qib.h"
>
> -static void __qib_release_user_pages(struct page **p, size_t num_pages,
> - int dirty)
> -{
> - if (dirty)
> - put_user_pages_dirty_lock(p, num_pages);
> - else
> - put_user_pages(p, num_pages);
> -}
> -
> /**
> * qib_map_page - a safety wrapper around pci_map_page()
> *
> @@ -124,7 +115,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
>
> return 0;
> bail_release:
> - __qib_release_user_pages(p, got, 0);
> + put_user_pages_dirty_lock(p, got, false);
> bail:
> atomic64_sub(num_pages, ¤t->mm->pinned_vm);
> return ret;
> @@ -132,7 +123,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
>
> void qib_release_user_pages(struct page **p, size_t num_pages)
> {
> - __qib_release_user_pages(p, num_pages, 1);
> + put_user_pages_dirty_lock(p, num_pages, true);
>
> /* during close after signal, mm can be NULL */
> if (current->mm)
> diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
> index 0b0237d41613..62e6ffa9ad78 100644
> --- a/drivers/infiniband/hw/usnic/usnic_uiom.c
> +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
> @@ -75,10 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
> for_each_sg(chunk->page_list, sg, chunk->nents, i) {
> page = sg_page(sg);
> pa = sg_phys(sg);
> - if (dirty)
> - put_user_pages_dirty_lock(&page, 1);
> - else
> - put_user_page(page);
> + put_user_pages_dirty_lock(&page, 1, dirty);
> usnic_dbg("pa: %pa\n", &pa);
> }
> kfree(chunk);
> diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
> index 67171c82b0c4..1e197753bf2f 100644
> --- a/drivers/infiniband/sw/siw/siw_mem.c
> +++ b/drivers/infiniband/sw/siw/siw_mem.c
> @@ -60,20 +60,6 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
> return NULL;
> }
>
> -static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
> - bool dirty)
> -{
> - struct page **p = chunk->plist;
> -
> - while (num_pages--) {
> - if (!PageDirty(*p) && dirty)
> - put_user_pages_dirty_lock(p, 1);
> - else
> - put_user_page(*p);
> - p++;
> - }
> -}
> -
> void siw_umem_release(struct siw_umem *umem, bool dirty)
> {
> struct mm_struct *mm_s = umem->owning_mm;
> @@ -82,8 +68,9 @@ void siw_umem_release(struct siw_umem *umem, bool dirty)
> for (i = 0; num_pages; i++) {
> int to_free = min_t(int, PAGES_PER_CHUNK, num_pages);
>
> - siw_free_plist(&umem->page_chunk[i], to_free,
> - umem->writable && dirty);
> + put_user_pages_dirty_lock(umem->page_chunk[i].plist,
> + to_free,
> + umem->writable && dirty);
> kfree(umem->page_chunk[i].plist);
> num_pages -= to_free;
> }
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 0334ca97c584..9759b6a24420 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1057,8 +1057,9 @@ static inline void put_user_page(struct page *page)
> put_page(page);
> }
>
> -void put_user_pages_dirty(struct page **pages, unsigned long npages);
> -void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
> +void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
> + bool make_dirty);
> +
> void put_user_pages(struct page **pages, unsigned long npages);
>
> #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
> diff --git a/mm/gup.c b/mm/gup.c
> index 98f13ab37bac..7fefd7ab02c4 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -29,85 +29,70 @@ struct follow_page_context {
> unsigned int page_mask;
> };
>
> -typedef int (*set_dirty_func_t)(struct page *page);
> -
> -static void __put_user_pages_dirty(struct page **pages,
> - unsigned long npages,
> - set_dirty_func_t sdf)
> -{
> - unsigned long index;
> -
> - for (index = 0; index < npages; index++) {
> - struct page *page = compound_head(pages[index]);
> -
> - /*
> - * Checking PageDirty at this point may race with
> - * clear_page_dirty_for_io(), but that's OK. Two key cases:
> - *
> - * 1) This code sees the page as already dirty, so it skips
> - * the call to sdf(). That could happen because
> - * clear_page_dirty_for_io() called page_mkclean(),
> - * followed by set_page_dirty(). However, now the page is
> - * going to get written back, which meets the original
> - * intention of setting it dirty, so all is well:
> - * clear_page_dirty_for_io() goes on to call
> - * TestClearPageDirty(), and write the page back.
> - *
> - * 2) This code sees the page as clean, so it calls sdf().
> - * The page stays dirty, despite being written back, so it
> - * gets written back again in the next writeback cycle.
> - * This is harmless.
> - */
> - if (!PageDirty(page))
> - sdf(page);
> -
> - put_user_page(page);
> - }
> -}
> -
> /**
> - * put_user_pages_dirty() - release and dirty an array of gup-pinned pages
> - * @pages: array of pages to be marked dirty and released.
> + * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
> + * @pages: array of pages to be maybe marked dirty, and definitely released.
Better would be.
@pages: array of pages to be put
> * @npages: number of pages in the @pages array.
> + * @make_dirty: whether to mark the pages dirty
> *
> * "gup-pinned page" refers to a page that has had one of the get_user_pages()
> * variants called on that page.
> *
> * For each page in the @pages array, make that page (or its head page, if a
> - * compound page) dirty, if it was previously listed as clean. Then, release
> - * the page using put_user_page().
> + * compound page) dirty, if @make_dirty is true, and if the page was previously
> + * listed as clean. In any case, releases all pages using put_user_page(),
> + * possibly via put_user_pages(), for the non-dirty case.
I don't think users of this interface need this level of detail. I think
something like.
* For each page in the @pages array, release the page. If @make_dirty is
* true, mark the page dirty prior to release.
> *
> * Please see the put_user_page() documentation for details.
> *
> - * set_page_dirty(), which does not lock the page, is used here.
> - * Therefore, it is the caller's responsibility to ensure that this is
> - * safe. If not, then put_user_pages_dirty_lock() should be called instead.
> + * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
> + * required, then the caller should a) verify that this is really correct,
> + * because _lock() is usually required, and b) hand code it:
> + * set_page_dirty_lock(), put_user_page().
> *
> */
> -void put_user_pages_dirty(struct page **pages, unsigned long npages)
> +void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
> + bool make_dirty)
> {
> - __put_user_pages_dirty(pages, npages, set_page_dirty);
> -}
> -EXPORT_SYMBOL(put_user_pages_dirty);
> + unsigned long index;
>
> -/**
> - * put_user_pages_dirty_lock() - release and dirty an array of gup-pinned pages
> - * @pages: array of pages to be marked dirty and released.
> - * @npages: number of pages in the @pages array.
> - *
> - * For each page in the @pages array, make that page (or its head page, if a
> - * compound page) dirty, if it was previously listed as clean. Then, release
> - * the page using put_user_page().
> - *
> - * Please see the put_user_page() documentation for details.
> - *
> - * This is just like put_user_pages_dirty(), except that it invokes
> - * set_page_dirty_lock(), instead of set_page_dirty().
> - *
> - */
> -void put_user_pages_dirty_lock(struct page **pages, unsigned long npages)
> -{
> - __put_user_pages_dirty(pages, npages, set_page_dirty_lock);
> + /*
> + * TODO: this can be optimized for huge pages: if a series of pages is
> + * physically contiguous and part of the same compound page, then a
> + * single operation to the head page should suffice.
> + */
I think this comment belongs to the for loop below... or just something about
how to make this and put_user_pages() more efficient. It is odd, that this is
the same comment as in put_user_pages()...
The code is good. So... Other than the comments.
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Ira
> +
> + if (!make_dirty) {
> + put_user_pages(pages, npages);
> + return;
> + }
> +
> + for (index = 0; index < npages; index++) {
> + struct page *page = compound_head(pages[index]);
> + /*
> + * Checking PageDirty at this point may race with
> + * clear_page_dirty_for_io(), but that's OK. Two key
> + * cases:
> + *
> + * 1) This code sees the page as already dirty, so it
> + * skips the call to set_page_dirty(). That could happen
> + * because clear_page_dirty_for_io() called
> + * page_mkclean(), followed by set_page_dirty().
> + * However, now the page is going to get written back,
> + * which meets the original intention of setting it
> + * dirty, so all is well: clear_page_dirty_for_io() goes
> + * on to call TestClearPageDirty(), and write the page
> + * back.
> + *
> + * 2) This code sees the page as clean, so it calls
> + * set_page_dirty(). The page stays dirty, despite being
> + * written back, so it gets written back again in the
> + * next writeback cycle. This is harmless.
> + */
> + if (!PageDirty(page))
> + set_page_dirty_lock(page);
> + put_user_page(page);
> + }
> }
> EXPORT_SYMBOL(put_user_pages_dirty_lock);
>
> --
> 2.22.0
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 41+ messages in thread
* Re: [Xen-devel] [PATCH v2 01/34] mm/gup: add make_dirty arg to put_user_pages_dirty_lock()
2019-08-06 17:39 ` Ira Weiny
@ 2019-08-06 20:39 ` John Hubbard
0 siblings, 0 replies; 41+ messages in thread
From: John Hubbard @ 2019-08-06 20:39 UTC (permalink / raw)
To: Ira Weiny, john.hubbard
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, Matthew Wilcox, sparclinux, Dan Williams, devel,
rds-devel, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, ceph-devel, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel,
Andrew Morton
On 8/6/19 10:39 AM, Ira Weiny wrote:
> On Sun, Aug 04, 2019 at 03:48:42PM -0700, john.hubbard@gmail.com wrote:
>> From: John Hubbard <jhubbard@nvidia.com>
...
>> -
>> /**
>> - * put_user_pages_dirty() - release and dirty an array of gup-pinned pages
>> - * @pages: array of pages to be marked dirty and released.
>> + * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
>> + * @pages: array of pages to be maybe marked dirty, and definitely released.
>
> Better would be.
>
> @pages: array of pages to be put
OK, I'll change to that wording.
>
>> * @npages: number of pages in the @pages array.
>> + * @make_dirty: whether to mark the pages dirty
>> *
>> * "gup-pinned page" refers to a page that has had one of the get_user_pages()
>> * variants called on that page.
>> *
>> * For each page in the @pages array, make that page (or its head page, if a
>> - * compound page) dirty, if it was previously listed as clean. Then, release
>> - * the page using put_user_page().
>> + * compound page) dirty, if @make_dirty is true, and if the page was previously
>> + * listed as clean. In any case, releases all pages using put_user_page(),
>> + * possibly via put_user_pages(), for the non-dirty case.
>
> I don't think users of this interface need this level of detail. I think
> something like.
>
> * For each page in the @pages array, release the page. If @make_dirty is
> * true, mark the page dirty prior to release.
Yes, it is too wordy, I'll change to that.
>
...
>> -void put_user_pages_dirty_lock(struct page **pages, unsigned long npages)
>> -{
>> - __put_user_pages_dirty(pages, npages, set_page_dirty_lock);
>> + /*
>> + * TODO: this can be optimized for huge pages: if a series of pages is
>> + * physically contiguous and part of the same compound page, then a
>> + * single operation to the head page should suffice.
>> + */
>
> I think this comment belongs to the for loop below... or just something about
> how to make this and put_user_pages() more efficient. It is odd, that this is
> the same comment as in put_user_pages()...
Actually I think I'll just delete the comment entirely, it's just noise really.
>
> The code is good. So... Other than the comments.
>
> Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Thanks for the review!
thanks,
--
John Hubbard
NVIDIA
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 02/34] net/rds: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 01/34] mm/gup: add make_dirty arg to put_user_pages_dirty_lock() john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 03/34] net/ceph: " john.hubbard
` (31 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
Santosh Shilimkar, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel,
David S . Miller
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: netdev@vger.kernel.org
Cc: linux-rdma@vger.kernel.org
Cc: rds-devel@oss.oracle.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
net/rds/info.c | 5 ++---
net/rds/message.c | 2 +-
net/rds/rdma.c | 15 +++++++--------
3 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/net/rds/info.c b/net/rds/info.c
index 03f6fd56d237..ca6af2889adf 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -162,7 +162,6 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
struct rds_info_lengths lens;
unsigned long nr_pages = 0;
unsigned long start;
- unsigned long i;
rds_info_func func;
struct page **pages = NULL;
int ret;
@@ -235,8 +234,8 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
ret = -EFAULT;
out:
- for (i = 0; pages && i < nr_pages; i++)
- put_page(pages[i]);
+ if (pages)
+ put_user_pages(pages, nr_pages);
kfree(pages);
return ret;
diff --git a/net/rds/message.c b/net/rds/message.c
index 50f13f1d4ae0..d7b0d266c437 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -404,7 +404,7 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
int i;
for (i = 0; i < rm->data.op_nents; i++)
- put_page(sg_page(&rm->data.op_sg[i]));
+ put_user_page(sg_page(&rm->data.op_sg[i]));
mmp = &rm->data.op_mmp_znotifier->z_mmp;
mm_unaccount_pinned_pages(mmp);
ret = -EFAULT;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 916f5ec373d8..6762e8696b99 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -162,8 +162,7 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
pages);
if (ret >= 0 && ret < nr_pages) {
- while (ret--)
- put_page(pages[ret]);
+ put_user_pages(pages, ret);
ret = -EFAULT;
}
@@ -276,7 +275,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
if (IS_ERR(trans_private)) {
for (i = 0 ; i < nents; i++)
- put_page(sg_page(&sg[i]));
+ put_user_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
goto out;
@@ -464,9 +463,10 @@ void rds_rdma_free_op(struct rm_rdma_op *ro)
* to local memory */
if (!ro->op_write) {
WARN_ON(!page->mapping && irqs_disabled());
- set_page_dirty(page);
+ put_user_pages_dirty_lock(&page, 1, true);
+ } else {
+ put_user_page(page);
}
- put_page(page);
}
kfree(ro->op_notifier);
@@ -481,8 +481,7 @@ void rds_atomic_free_op(struct rm_atomic_op *ao)
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
- set_page_dirty(page);
- put_page(page);
+ put_user_pages_dirty_lock(&page, 1, true);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
@@ -867,7 +866,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
return ret;
err:
if (page)
- put_page(page);
+ put_user_page(page);
rm->atomic.op_active = 0;
kfree(rm->atomic.op_notifier);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 03/34] net/ceph: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 01/34] mm/gup: add make_dirty arg to put_user_pages_dirty_lock() john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 02/34] net/rds: convert put_page() to put_user_page*() john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 04/34] x86/kvm: " john.hubbard
` (30 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
Sage Weil, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, xen-devel, Ilya Dryomov, devel, linux-media,
John Hubbard, intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, Jeff Layton, LKML, linux-xfs, linux-crypto,
linux-fsdevel, David S . Miller
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Acked-by: Jeff Layton <jlayton@kernel.org>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Sage Weil <sage@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: ceph-devel@vger.kernel.org
Cc: netdev@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
net/ceph/pagevec.c | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 64305e7056a1..c88fff2ab9bd 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -12,13 +12,7 @@
void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
{
- int i;
-
- for (i = 0; i < num_pages; i++) {
- if (dirty)
- set_page_dirty_lock(pages[i]);
- put_page(pages[i]);
- }
+ put_user_pages_dirty_lock(pages, num_pages, dirty);
kvfree(pages);
}
EXPORT_SYMBOL(ceph_put_page_vector);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 04/34] x86/kvm: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (2 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 03/34] net/ceph: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 05/34] drm/etnaviv: convert release_pages() to put_user_pages() john.hubbard
` (29 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Radim Krčmář,
Dave Hansen, Dave Chinner, dri-devel, linux-mm, H . Peter Anvin,
sparclinux, Ira Weiny, ceph-devel, devel, rds-devel, linux-rdma,
Joerg Roedel, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
Ingo Molnar, xen-devel, devel, linux-media, John Hubbard,
intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, Thomas Gleixner, linux-crypto,
linux-fsdevel, Paolo Bonzini
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
arch/x86/kvm/svm.c | 4 ++--
virt/kvm/kvm_main.c | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7eafc6907861..ff93c923ed36 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1827,7 +1827,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
err:
if (npinned > 0)
- release_pages(pages, npinned);
+ put_user_pages(pages, npinned);
kvfree(pages);
return NULL;
@@ -1838,7 +1838,7 @@ static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- release_pages(pages, npages);
+ put_user_pages(pages, npages);
kvfree(pages);
sev->pages_locked -= npages;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 887f3b0c2b60..4b6a596ea8e9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1499,7 +1499,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
*writable = true;
- put_page(page);
+ put_user_page(page);
page = wpage;
}
}
@@ -1831,7 +1831,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(kvm_pfn_t pfn)
{
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
- put_page(pfn_to_page(pfn));
+ put_user_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 05/34] drm/etnaviv: convert release_pages() to put_user_pages()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (3 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 04/34] x86/kvm: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 06/34] drm/i915: convert put_page() to put_user_page*() john.hubbard
` (28 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Radim Krčmář,
Dave Hansen, Dave Chinner, dri-devel, linux-mm, H . Peter Anvin,
sparclinux, Ira Weiny, ceph-devel, devel, rds-devel, linux-rdma,
Joerg Roedel, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
Ingo Molnar, xen-devel, devel, linux-media, John Hubbard,
intel-gfx, linux-block, Jérôme Glisse, Borislav Petkov,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, Thomas Gleixner, linux-crypto,
linux-fsdevel, Paolo Bonzini
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/gpu/drm/etnaviv/etnaviv_gem.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index e8778ebb72e6..a0144a5ee325 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -686,7 +686,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
ret = get_user_pages_fast(ptr, num_pages,
!userptr->ro ? FOLL_WRITE : 0, pages);
if (ret < 0) {
- release_pages(pvec, pinned);
+ put_user_pages(pvec, pinned);
kvfree(pvec);
return ret;
}
@@ -710,7 +710,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
if (etnaviv_obj->pages) {
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- release_pages(etnaviv_obj->pages, npages);
+ put_user_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 06/34] drm/i915: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (4 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 05/34] drm/etnaviv: convert release_pages() to put_user_pages() john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-05 16:53 ` Rodrigo Vivi
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 07/34] drm/radeon: " john.hubbard
` (27 subsequent siblings)
33 siblings, 1 reply; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, David Airlie, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, rds-devel, linux-rdma, x86, amd-gfx,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, Joonas Lahtinen, John Hubbard, intel-gfx,
Jani Nikula, linux-block, Jérôme Glisse,
linux-rpi-kernel, Rodrigo Vivi, Dan Williams, linux-arm-kernel,
linux-nfs, netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
This is a merge-able version of the fix, because it restricts
itself to put_user_page() and put_user_pages(), both of which
have not changed their APIs. Later, i915_gem_userptr_put_pages()
can be simplified to use put_user_pages_dirty_lock().
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: David Airlie <airlied@linux.ie>
Cc: intel-gfx@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 2caa594322bc..76dda2923cf1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -527,7 +527,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned);
+ put_user_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -640,7 +640,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned);
+ put_user_pages(pvec, pinned);
kvfree(pvec);
return PTR_ERR_OR_ZERO(pages);
@@ -675,7 +675,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
set_page_dirty_lock(page);
mark_page_accessed(page);
- put_page(page);
+ put_user_page(page);
}
obj->mm.dirty = false;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* Re: [Xen-devel] [PATCH v2 06/34] drm/i915: convert put_page() to put_user_page*()
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 06/34] drm/i915: convert put_page() to put_user_page*() john.hubbard
@ 2019-08-05 16:53 ` Rodrigo Vivi
0 siblings, 0 replies; 41+ messages in thread
From: Rodrigo Vivi @ 2019-08-05 16:53 UTC (permalink / raw)
To: john.hubbard
Cc: linux-fbdev, Jan Kara, kvm, David Airlie, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
Dan Williams, devel, rds-devel, linux-rdma, x86, amd-gfx,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, ceph-devel,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, linux-fsdevel, Andrew Morton
On Sun, Aug 04, 2019 at 03:48:47PM -0700, john.hubbard@gmail.com wrote:
> From: John Hubbard <jhubbard@nvidia.com>
>
> For pages that were retained via get_user_pages*(), release those pages
> via the new put_user_page*() routines, instead of via put_page() or
> release_pages().
>
> This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
> ("mm: introduce put_user_page*(), placeholder versions").
>
> This is a merge-able version of the fix, because it restricts
> itself to put_user_page() and put_user_pages(), both of which
> have not changed their APIs. Later, i915_gem_userptr_put_pages()
> can be simplified to use put_user_pages_dirty_lock().
Thanks for that.
with this version we won't have any conflict.
Ack for going through mm tree.
>
> Cc: Jani Nikula <jani.nikula@linux.intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: David Airlie <airlied@linux.ie>
> Cc: intel-gfx@lists.freedesktop.org
> Cc: dri-devel@lists.freedesktop.org
> Signed-off-by: John Hubbard <jhubbard@nvidia.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 6 +++---
> 1 file changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> index 2caa594322bc..76dda2923cf1 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> @@ -527,7 +527,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
> }
> mutex_unlock(&obj->mm.lock);
>
> - release_pages(pvec, pinned);
> + put_user_pages(pvec, pinned);
> kvfree(pvec);
>
> i915_gem_object_put(obj);
> @@ -640,7 +640,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
> __i915_gem_userptr_set_active(obj, true);
>
> if (IS_ERR(pages))
> - release_pages(pvec, pinned);
> + put_user_pages(pvec, pinned);
> kvfree(pvec);
>
> return PTR_ERR_OR_ZERO(pages);
> @@ -675,7 +675,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
> set_page_dirty_lock(page);
>
> mark_page_accessed(page);
> - put_page(page);
> + put_user_page(page);
> }
> obj->mm.dirty = false;
>
> --
> 2.22.0
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 07/34] drm/radeon: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (5 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 06/34] drm/i915: convert put_page() to put_user_page*() john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 08/34] media/ivtv: " john.hubbard
` (26 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, David Airlie, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, David Zhou, rds-devel, linux-rdma, x86,
amd-gfx, Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block, Alex Deucher,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, linux-fsdevel, Christian König
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: David (ChunMing) Zhou <David1.Zhou@amd.com>
Cc: David Airlie <airlied@linux.ie>
Cc: amd-gfx@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/gpu/drm/radeon/radeon_ttm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fb3696bc616d..4c9943fa10df 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -540,7 +540,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
kfree(ttm->sg);
release_pages:
- release_pages(ttm->pages, pinned);
+ put_user_pages(ttm->pages, pinned);
return r;
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 08/34] media/ivtv: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (6 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 07/34] drm/radeon: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 09/34] media/v4l2-core/mm: " john.hubbard
` (25 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
linux-block, Jérôme Glisse, linux-rpi-kernel,
Dan Williams, Mauro Carvalho Chehab, linux-arm-kernel, linux-nfs,
Andy Walls, netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Andy Walls <awalls@md.metrocast.net>
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: linux-media@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/media/pci/ivtv/ivtv-udma.c | 14 ++++----------
drivers/media/pci/ivtv/ivtv-yuv.c | 11 +++--------
2 files changed, 7 insertions(+), 18 deletions(-)
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 5f8883031c9c..7c7f33c2412b 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -92,7 +92,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
{
struct ivtv_dma_page_info user_dma;
struct ivtv_user_dma *dma = &itv->udma;
- int i, err;
+ int err;
IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
@@ -119,8 +119,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
err, user_dma.page_count);
if (err >= 0) {
- for (i = 0; i < err; i++)
- put_page(dma->map[i]);
+ put_user_pages(dma->map, err);
return -EINVAL;
}
return err;
@@ -130,9 +129,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
/* Fill SG List with new values */
if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
- for (i = 0; i < dma->page_count; i++) {
- put_page(dma->map[i]);
- }
+ put_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
return -ENOMEM;
}
@@ -153,7 +150,6 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
void ivtv_udma_unmap(struct ivtv *itv)
{
struct ivtv_user_dma *dma = &itv->udma;
- int i;
IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
@@ -170,9 +166,7 @@ void ivtv_udma_unmap(struct ivtv *itv)
ivtv_udma_sync_for_cpu(itv);
/* Release User Pages */
- for (i = 0; i < dma->page_count; i++) {
- put_page(dma->map[i]);
- }
+ put_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index cd2fe2d444c0..2c61a11d391d 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -30,7 +30,6 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
struct yuv_playback_info *yi = &itv->yuv_info;
u8 frame = yi->draw_frame;
struct yuv_frame_info *f = &yi->new_frame_info[frame];
- int i;
int y_pages, uv_pages;
unsigned long y_buffer_offset, uv_buffer_offset;
int y_decode_height, uv_decode_height, y_size;
@@ -81,8 +80,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
uv_pages, uv_dma.page_count);
if (uv_pages >= 0) {
- for (i = 0; i < uv_pages; i++)
- put_page(dma->map[y_pages + i]);
+ put_user_pages(&dma->map[y_pages], uv_pages);
rc = -EFAULT;
} else {
rc = uv_pages;
@@ -93,8 +91,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
y_pages, y_dma.page_count);
}
if (y_pages >= 0) {
- for (i = 0; i < y_pages; i++)
- put_page(dma->map[i]);
+ put_user_pages(dma->map, y_pages);
/*
* Inherit the -EFAULT from rc's
* initialization, but allow it to be
@@ -112,9 +109,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Fill & map SG List */
if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
- for (i = 0; i < dma->page_count; i++) {
- put_page(dma->map[i]);
- }
+ put_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
return -ENOMEM;
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 09/34] media/v4l2-core/mm: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (7 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 08/34] media/ivtv: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 10/34] genwqe: " john.hubbard
` (24 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
Hans Verkuil, xen-devel, devel, linux-media, Kees Cook,
John Hubbard, intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, Mauro Carvalho Chehab,
linux-arm-kernel, linux-nfs, netdev, LKML, Souptick Joarder,
linux-xfs, linux-crypto, Sakari Ailus, linux-fsdevel,
Robin Murphy
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Hans Verkuil <hans.verkuil@cisco.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: linux-media@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/media/v4l2-core/videobuf-dma-sg.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 66a6c6c236a7..d6eeb437ec19 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -349,8 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
BUG_ON(dma->sglen);
if (dma->pages) {
- for (i = 0; i < dma->nr_pages; i++)
- put_page(dma->pages[i]);
+ put_user_pages(dma->pages, dma->nr_pages);
kfree(dma->pages);
dma->pages = NULL;
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 10/34] genwqe: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (8 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 09/34] media/v4l2-core/mm: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 11/34] scif: " john.hubbard
` (23 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, Arnd Bergmann,
Guilherme G . Piccoli, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, Greg Kroah-Hartman, linux-fsdevel, Frank Haverkamp
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
This changes the release code slightly, because each page slot in the
page_list[] array is no longer checked for NULL. However, that check
was wrong anyway, because the get_user_pages() pattern of usage here
never allowed for NULL entries within a range of pinned pages.
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Frank Haverkamp <haver@linux.vnet.ibm.com>
Cc: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/misc/genwqe/card_utils.c | 17 +++--------------
1 file changed, 3 insertions(+), 14 deletions(-)
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 2e1c4d2905e8..2a888f31d2c5 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -517,24 +517,13 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
/**
* genwqe_free_user_pages() - Give pinned pages back
*
- * Documentation of get_user_pages is in mm/gup.c:
- *
- * If the page is written to, set_page_dirty (or set_page_dirty_lock,
- * as appropriate) must be called after the page is finished with, and
- * before put_page is called.
+ * The pages may have been written to, so we call put_user_pages_dirty_lock(),
+ * rather than put_user_pages().
*/
static int genwqe_free_user_pages(struct page **page_list,
unsigned int nr_pages, int dirty)
{
- unsigned int i;
-
- for (i = 0; i < nr_pages; i++) {
- if (page_list[i] != NULL) {
- if (dirty)
- set_page_dirty_lock(page_list[i]);
- put_page(page_list[i]);
- }
- }
+ put_user_pages_dirty_lock(page_list, nr_pages, dirty);
return 0;
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 11/34] scif: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (9 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 10/34] genwqe: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 12/34] vmci: " john.hubbard
` (22 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner,
Sudeep Dutt, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, Ashutosh Dixit, rds-devel, linux-rdma, x86,
amd-gfx, Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, Joerg Roedel, Arnd Bergmann, John Hubbard,
intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Zhen Lei, Dan Williams, linux-arm-kernel,
linux-nfs, netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel,
Robin Murphy
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Sudeep Dutt <sudeep.dutt@intel.com>
Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/misc/mic/scif/scif_rma.c | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 01e27682ea30..d84ed9466920 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -113,13 +113,14 @@ static int scif_destroy_pinned_pages(struct scif_pinned_pages *pin)
int writeable = pin->prot & SCIF_PROT_WRITE;
int kernel = SCIF_MAP_KERNEL & pin->map_flags;
- for (j = 0; j < pin->nr_pages; j++) {
- if (pin->pages[j] && !kernel) {
+ if (kernel) {
+ for (j = 0; j < pin->nr_pages; j++) {
if (writeable)
- SetPageDirty(pin->pages[j]);
+ set_page_dirty_lock(pin->pages[j]);
put_page(pin->pages[j]);
}
- }
+ } else
+ put_user_pages_dirty_lock(pin->pages, pin->nr_pages, writeable);
scif_free(pin->pages,
pin->nr_pages * sizeof(*pin->pages));
@@ -1385,11 +1386,9 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
if (ulimit)
__scif_dec_pinned_vm_lock(mm, nr_pages);
/* Roll back any pinned pages */
- for (i = 0; i < pinned_pages->nr_pages; i++) {
- if (pinned_pages->pages[i])
- put_page(
- pinned_pages->pages[i]);
- }
+ put_user_pages(pinned_pages->pages,
+ pinned_pages->nr_pages);
+
prot &= ~SCIF_PROT_WRITE;
try_upgrade = false;
goto retry;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 12/34] vmci: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (10 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 11/34] scif: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 13/34] rapidio: " john.hubbard
` (21 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Gustavo A . R . Silva, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, rds-devel, linux-rdma, x86, amd-gfx,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, Kees Cook, Arnd Bergmann, John Hubbard, intel-gfx,
linux-block, Jérôme Glisse, linux-rpi-kernel,
Dan Williams, linux-arm-kernel, linux-nfs, netdev, LKML,
linux-xfs, linux-crypto, linux-fsdevel, Al Viro
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Note that this effectively changes the code's behavior in
qp_release_pages(): it now ultimately calls set_page_dirty_lock(),
instead of set_page_dirty(). This is probably more accurate.
As Christoph Hellwig put it, "set_page_dirty() is only safe if we are
dealing with a file backed page where we have reference on the inode it
hangs off." [1]
[1] https://lore.kernel.org/r/20190723153640.GB720@lst.de
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Gustavo A. R. Silva <gustavo@embeddedor.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/misc/vmw_vmci/vmci_context.c | 2 +-
drivers/misc/vmw_vmci/vmci_queue_pair.c | 11 ++---------
2 files changed, 3 insertions(+), 10 deletions(-)
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index 16695366ec92..9daa52ee63b7 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -587,7 +587,7 @@ void vmci_ctx_unset_notify(struct vmci_ctx *context)
if (notify_page) {
kunmap(notify_page);
- put_page(notify_page);
+ put_user_page(notify_page);
}
}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8531ae781195..e5434551d0ef 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -626,15 +626,8 @@ static void qp_release_queue_mutex(struct vmci_queue *queue)
static void qp_release_pages(struct page **pages,
u64 num_pages, bool dirty)
{
- int i;
-
- for (i = 0; i < num_pages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
-
- put_page(pages[i]);
- pages[i] = NULL;
- }
+ put_user_pages_dirty_lock(pages, num_pages, dirty);
+ memset(pages, 0, num_pages * sizeof(struct page *));
}
/*
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 13/34] rapidio: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (11 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 12/34] vmci: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 14/34] oradax: " john.hubbard
` (20 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, Matt Porter, devel, linux-media, Kees Cook,
John Hubbard, intel-gfx, linux-block, Jérôme Glisse,
Christophe JAILLET, linux-rpi-kernel, Dan Williams,
linux-arm-kernel, linux-nfs, Tvrtko Ursulin, netdev, LKML,
linux-xfs, Alexandre Bounine, linux-crypto, Ioan Nicu,
linux-fsdevel, Logan Gunthorpe, Al Viro
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Alexandre Bounine <alex.bou9@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Cc: Ioan Nicu <ioan.nicu.ext@nokia.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/rapidio/devices/rio_mport_cdev.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 8155f59ece38..0e8ea0e5a89e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -572,14 +572,12 @@ static void dma_req_free(struct kref *ref)
struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
refcount);
struct mport_cdev_priv *priv = req->priv;
- unsigned int i;
dma_unmap_sg(req->dmach->device->dev,
req->sgt.sgl, req->sgt.nents, req->dir);
sg_free_table(&req->sgt);
if (req->page_list) {
- for (i = 0; i < req->nr_pages; i++)
- put_page(req->page_list[i]);
+ put_user_pages(req->page_list, req->nr_pages);
kfree(req->page_list);
}
@@ -815,7 +813,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
struct mport_dma_req *req;
struct mport_dev *md = priv->md;
struct dma_chan *chan;
- int i, ret;
+ int ret;
int nents;
if (xfer->length == 0)
@@ -946,8 +944,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
err_pg:
if (!req->page_list) {
- for (i = 0; i < nr_pages; i++)
- put_page(page_list[i]);
+ put_user_pages(page_list, nr_pages);
kfree(page_list);
}
err_req:
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 14/34] oradax: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (12 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 13/34] rapidio: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 15/34] staging/vc04_services: " john.hubbard
` (19 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Rob Gardner, Jan Kara, kvm, Mauro Carvalho Chehab,
Dave Hansen, Dave Chinner, dri-devel, linux-mm, Wei Yongjun,
sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
Jonathan Corbet, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, Andy Shevchenko, xen-devel, devel, linux-media,
Jonathan Helman, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, linux-fsdevel, David S . Miller
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: David S. Miller <davem@davemloft.net>
Cc: Jonathan Helman <jonathan.helman@oracle.com>
Cc: Rob Gardner <rob.gardner@oracle.com>
Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Wei Yongjun <weiyongjun1@huawei.com>
Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Cc: sparclinux@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/sbus/char/oradax.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index 8af216287a84..029e619992fc 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -412,7 +412,7 @@ static void dax_unlock_pages(struct dax_ctx *ctx, int ccb_index, int nelem)
dax_dbg("freeing page %p", p);
if (j == OUT)
set_page_dirty(p);
- put_page(p);
+ put_user_page(p);
ctx->pages[i][j] = NULL;
}
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 15/34] staging/vc04_services: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (13 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 14/34] oradax: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-13 5:23 ` Stefan Wahren
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 16/34] drivers/tee: " john.hubbard
` (18 subsequent siblings)
33 siblings, 1 reply; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, Suniel Mahesh, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, Mihaela Muraru, xen-devel, devel, linux-media,
Stefan Wahren, John Hubbard, intel-gfx, Kishore KP, linux-block,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
Sidong Yang, linux-arm-kernel, linux-nfs, Eric Anholt, netdev,
LKML, linux-xfs, linux-crypto, Greg Kroah-Hartman, linux-fsdevel,
Al Viro
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Eric Anholt <eric@anholt.net>
Cc: Stefan Wahren <stefan.wahren@i2se.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mihaela Muraru <mihaela.muraru21@gmail.com>
Cc: Suniel Mahesh <sunil.m@techveda.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Sidong Yang <realwakka@gmail.com>
Cc: Kishore KP <kishore.p@techveda.org>
Cc: linux-rpi-kernel@lists.infradead.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: devel@driverdev.osuosl.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
.../vc04_services/interface/vchiq_arm/vchiq_2835_arm.c | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 61c69f353cdb..ec92b4c50e95 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -336,10 +336,7 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
}
if (pagelistinfo->pages_need_release) {
- unsigned int i;
-
- for (i = 0; i < pagelistinfo->num_pages; i++)
- put_page(pagelistinfo->pages[i]);
+ put_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
}
dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
@@ -454,10 +451,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
__func__, actual_pages, num_pages);
/* This is probably due to the process being killed */
- while (actual_pages > 0) {
- actual_pages--;
- put_page(pages[actual_pages]);
- }
+ put_user_pages(pages, actual_pages);
cleanup_pagelistinfo(pagelistinfo);
return NULL;
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* Re: [Xen-devel] [PATCH v2 15/34] staging/vc04_services: convert put_page() to put_user_page*()
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 15/34] staging/vc04_services: " john.hubbard
@ 2019-08-13 5:23 ` Stefan Wahren
0 siblings, 0 replies; 41+ messages in thread
From: Stefan Wahren @ 2019-08-13 5:23 UTC (permalink / raw)
To: john.hubbard, Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Greg Kroah-Hartman, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Suniel Mahesh,
Dan Williams, devel, rds-devel, linux-rdma, Ira Weiny, x86,
amd-gfx, Christoph Hellwig, Jason Gunthorpe, John Hubbard,
xen-devel, devel, linux-media, Mihaela Muraru, intel-gfx,
linux-block, Jérôme Glisse, linux-rpi-kernel,
ceph-devel, Sidong Yang, linux-arm-kernel, linux-nfs,
Eric Anholt, netdev, LKML, linux-xfs, linux-crypto, Kishore KP,
linux-fsdevel, Al Viro
On 05.08.19 00:48, john.hubbard@gmail.com wrote:
> From: John Hubbard <jhubbard@nvidia.com>
>
> For pages that were retained via get_user_pages*(), release those pages
> via the new put_user_page*() routines, instead of via put_page() or
> release_pages().
>
> This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
> ("mm: introduce put_user_page*(), placeholder versions").
>
> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
>
> Cc: Eric Anholt <eric@anholt.net>
> Cc: Stefan Wahren <stefan.wahren@i2se.com>
> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> Cc: Mihaela Muraru <mihaela.muraru21@gmail.com>
> Cc: Suniel Mahesh <sunil.m@techveda.org>
> Cc: Al Viro <viro@zeniv.linux.org.uk>
> Cc: Sidong Yang <realwakka@gmail.com>
> Cc: Kishore KP <kishore.p@techveda.org>
> Cc: linux-rpi-kernel@lists.infradead.org
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: devel@driverdev.osuosl.org
> Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Acked-by: Stefan Wahren <stefan.wahren@i2se.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 16/34] drivers/tee: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (14 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 15/34] staging/vc04_services: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 17/34] vfio: " john.hubbard
` (17 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
linux-block, Jérôme Glisse, linux-rpi-kernel,
Dan Williams, linux-arm-kernel, linux-nfs, netdev, LKML,
linux-xfs, linux-crypto, linux-fsdevel, Jens Wiklander
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Acked-by: Jens Wiklander <jens.wiklander@linaro.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/tee/tee_shm.c | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 2da026fd12c9..c967d0420b67 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -31,16 +31,13 @@ static void tee_shm_release(struct tee_shm *shm)
poolm->ops->free(poolm, shm);
} else if (shm->flags & TEE_SHM_REGISTER) {
- size_t n;
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
if (rc)
dev_err(teedev->dev.parent,
"unregister shm %p failed: %d", shm, rc);
- for (n = 0; n < shm->num_pages; n++)
- put_page(shm->pages[n]);
-
+ put_user_pages(shm->pages, shm->num_pages);
kfree(shm->pages);
}
@@ -313,16 +310,13 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
return shm;
err:
if (shm) {
- size_t n;
-
if (shm->id >= 0) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
mutex_unlock(&teedev->mutex);
}
if (shm->pages) {
- for (n = 0; n < shm->num_pages; n++)
- put_page(shm->pages[n]);
+ put_user_pages(shm->pages, shm->num_pages);
kfree(shm->pages);
}
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 17/34] vfio: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (15 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 16/34] drivers/tee: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 18/34] fbdev/pvr2fb: " john.hubbard
` (16 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
Alex Williamson, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, xen-devel, devel, linux-media, John Hubbard,
intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Note that this effectively changes the code's behavior in
qp_release_pages(): it now ultimately calls set_page_dirty_lock(),
instead of set_page_dirty(). This is probably more accurate.
As Christoph Hellwig put it, "set_page_dirty() is only safe if we are
dealing with a file backed page where we have reference on the inode it
hangs off." [1]
[1] https://lore.kernel.org/r/20190723153640.GB720@lst.de
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: kvm@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/vfio/vfio_iommu_type1.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 054391f30fa8..5a5461a14299 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -320,9 +320,9 @@ static int put_pfn(unsigned long pfn, int prot)
{
if (!is_invalid_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
- if (prot & IOMMU_WRITE)
- SetPageDirty(page);
- put_page(page);
+ bool dirty = prot & IOMMU_WRITE;
+
+ put_user_pages_dirty_lock(&page, 1, dirty);
return 1;
}
return 0;
@@ -356,7 +356,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
*/
if (ret > 0 && vma_is_fsdax(vmas[0])) {
ret = -EOPNOTSUPP;
- put_page(page[0]);
+ put_user_page(page[0]);
}
}
up_read(&mm->mmap_sem);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 18/34] fbdev/pvr2fb: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (16 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 17/34] vfio: " john.hubbard
@ 2019-08-04 22:48 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 19/34] fsl_hypervisor: " john.hubbard
` (15 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:48 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, Kees Cook,
Bartlomiej Zolnierkiewicz, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Arvind Yadav,
Dan Williams, linux-arm-kernel, linux-nfs, netdev, LKML,
linux-xfs, linux-crypto, linux-fsdevel, Bhumika Goyal, Al Viro
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Bhumika Goyal <bhumirks@gmail.com>
Cc: Arvind Yadav <arvind.yadav.cs@gmail.com>
Cc: dri-devel@lists.freedesktop.org
Cc: linux-fbdev@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/video/fbdev/pvr2fb.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 7ff4b6b84282..0e4f9aa6444d 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -700,8 +700,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
ret = count;
out_unmap:
- for (i = 0; i < nr_pages; i++)
- put_page(pages[i]);
+ put_user_pages(pages, nr_pages);
kfree(pages);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 19/34] fsl_hypervisor: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (17 preceding siblings ...)
2019-08-04 22:48 ` [Xen-devel] [PATCH v2 18/34] fbdev/pvr2fb: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 20/34] xen: " john.hubbard
` (14 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, Rob Herring,
rds-devel, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, xen-devel, devel, linux-media, Kees Cook,
John Hubbard, intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel, Al Viro
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
This changes the release code slightly, because each page slot in the
page_list[] array is no longer checked for NULL. However, that check
was wrong anyway, because the get_user_pages() pattern of usage here
never allowed for NULL entries within a range of pinned pages.
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Kees Cook <keescook@chromium.org>
Cc: Rob Herring <robh@kernel.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/virt/fsl_hypervisor.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 93d5bebf9572..a8f78d572c45 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -292,11 +292,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
virt_to_phys(sg_list), num_pages);
exit:
- if (pages) {
- for (i = 0; i < num_pages; i++)
- if (pages[i])
- put_page(pages[i]);
- }
+ if (pages)
+ put_user_pages(pages, num_pages);
kfree(sg_list_unaligned);
kfree(pages);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 20/34] xen: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (18 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 19/34] fsl_hypervisor: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-05 4:15 ` Juergen Gross
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 21/34] fs/exec.c: " john.hubbard
` (13 subsequent siblings)
33 siblings, 1 reply; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Boris Ostrovsky, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, rds-devel, linux-rdma, x86, amd-gfx,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
linux-arm-kernel, Juergen Gross, linux-nfs, netdev, LKML,
linux-xfs, linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
This also handles pages[i] == NULL cases, thanks to an approach
that is actually written by Juergen Gross.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: xen-devel@lists.xenproject.org
---
Hi Juergen,
Say, this is *exactly* what you proposed in your gup.patch, so
I've speculatively added your Signed-off-by above, but need your
approval before that's final. Let me know please...
thanks,
John Hubbard
drivers/xen/privcmd.c | 32 +++++++++++---------------------
1 file changed, 11 insertions(+), 21 deletions(-)
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index c6070e70dd73..c7d0763ca8c2 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -582,10 +582,11 @@ static long privcmd_ioctl_mmap_batch(
static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
- struct page *pages[], unsigned int nr_pages)
+ struct page *pages[], unsigned int *nr_pages)
{
- unsigned int i;
+ unsigned int i, free = *nr_pages;
+ *nr_pages = 0;
for (i = 0; i < num; i++) {
unsigned int requested;
int pinned;
@@ -593,35 +594,22 @@ static int lock_pages(
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
PAGE_SIZE);
- if (requested > nr_pages)
+ if (requested > free)
return -ENOSPC;
pinned = get_user_pages_fast(
(unsigned long) kbufs[i].uptr,
- requested, FOLL_WRITE, pages);
+ requested, FOLL_WRITE, pages + *nr_pages);
if (pinned < 0)
return pinned;
- nr_pages -= pinned;
- pages += pinned;
+ free -= pinned;
+ *nr_pages += pinned;
}
return 0;
}
-static void unlock_pages(struct page *pages[], unsigned int nr_pages)
-{
- unsigned int i;
-
- if (!pages)
- return;
-
- for (i = 0; i < nr_pages; i++) {
- if (pages[i])
- put_page(pages[i]);
- }
-}
-
static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
{
struct privcmd_data *data = file->private_data;
@@ -681,11 +669,12 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
if (!xbufs) {
+ nr_pages = 0;
rc = -ENOMEM;
goto out;
}
- rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
+ rc = lock_pages(kbufs, kdata.num, pages, &nr_pages);
if (rc)
goto out;
@@ -699,7 +688,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xen_preemptible_hcall_end();
out:
- unlock_pages(pages, nr_pages);
+ if (pages)
+ put_user_pages(pages, nr_pages);
kfree(xbufs);
kfree(pages);
kfree(kbufs);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* Re: [Xen-devel] [PATCH v2 20/34] xen: convert put_page() to put_user_page*()
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 20/34] xen: " john.hubbard
@ 2019-08-05 4:15 ` Juergen Gross
0 siblings, 0 replies; 41+ messages in thread
From: Juergen Gross @ 2019-08-05 4:15 UTC (permalink / raw)
To: john.hubbard, Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Boris Ostrovsky, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
Dan Williams, devel, rds-devel, linux-rdma, x86, amd-gfx,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, ceph-devel,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, linux-fsdevel
On 05.08.19 00:49, john.hubbard@gmail.com wrote:
> From: John Hubbard <jhubbard@nvidia.com>
>
> For pages that were retained via get_user_pages*(), release those pages
> via the new put_user_page*() routines, instead of via put_page() or
> release_pages().
>
> This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
> ("mm: introduce put_user_page*(), placeholder versions").
>
> This also handles pages[i] == NULL cases, thanks to an approach
> that is actually written by Juergen Gross.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
> Signed-off-by: John Hubbard <jhubbard@nvidia.com>
>
> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> Cc: xen-devel@lists.xenproject.org
> ---
>
> Hi Juergen,
>
> Say, this is *exactly* what you proposed in your gup.patch, so
> I've speculatively added your Signed-off-by above, but need your
> approval before that's final. Let me know please...
Yes, that's fine with me.
Juergen
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 21/34] fs/exec.c: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (19 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 20/34] xen: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 22/34] orangefs: " john.hubbard
` (12 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
linux-block, Jérôme Glisse, linux-rpi-kernel,
Dan Williams, linux-arm-kernel, linux-nfs, netdev, LKML,
linux-xfs, linux-crypto, linux-fsdevel, Alexander Viro
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: linux-fsdevel@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
fs/exec.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/exec.c b/fs/exec.c
index f7f6a140856a..ee442151582f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -227,7 +227,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
static void put_arg_page(struct page *page)
{
- put_page(page);
+ put_user_page(page);
}
static void free_arg_pages(struct linux_binprm *bprm)
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 22/34] orangefs: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (20 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 21/34] fs/exec.c: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 23/34] uprobes: " john.hubbard
` (11 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: Martin Brandenburg, linux-fbdev, Jan Kara, kvm, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, Mike Marshall, devel, rds-devel, linux-rdma, x86,
amd-gfx, Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
linux-arm-kernel, linux-nfs, netdev, LKML, linux-xfs,
linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Mike Marshall <hubcap@omnibond.com>
Cc: Martin Brandenburg <martin@omnibond.com>
Cc: devel@lists.orangefs.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
fs/orangefs/orangefs-bufmap.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 2bb916d68576..f2f33a16d604 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -168,10 +168,7 @@ static DEFINE_SPINLOCK(orangefs_bufmap_lock);
static void
orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
{
- int i;
-
- for (i = 0; i < bufmap->page_count; i++)
- put_page(bufmap->page_array[i]);
+ put_user_pages(bufmap->page_array, bufmap->page_count);
}
static void
@@ -280,7 +277,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
for (i = 0; i < ret; i++) {
SetPageError(bufmap->page_array[i]);
- put_page(bufmap->page_array[i]);
+ put_user_page(bufmap->page_array[i]);
}
return -ENOMEM;
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 23/34] uprobes: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (21 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 22/34] orangefs: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 24/34] futex: " john.hubbard
` (10 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Peter Zijlstra, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, rds-devel, linux-rdma, Jiri Olsa, x86,
amd-gfx, Christoph Hellwig, Jason Gunthorpe, Ingo Molnar,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
Arnaldo Carvalho de Melo, linux-block, Jérôme Glisse,
linux-rpi-kernel, Namhyung Kim, Dan Williams, linux-arm-kernel,
linux-nfs, netdev, LKML, Alexander Shishkin, linux-xfs,
linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
kernel/events/uprobes.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 84fa00497c49..4a575de8cec8 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -397,7 +397,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
ret = 0;
out:
kunmap_atomic(kaddr);
- put_page(page);
+ put_user_page(page);
return ret;
}
@@ -504,7 +504,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
ret = __replace_page(vma, vaddr, old_page, new_page);
put_page(new_page);
put_old:
- put_page(old_page);
+ put_user_page(old_page);
if (unlikely(ret == -EAGAIN))
goto retry;
@@ -1981,7 +1981,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
return result;
copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
- put_page(page);
+ put_user_page(page);
out:
/* This needs to return true for any variant of the trap insn */
return is_trap_insn(&opcode);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 24/34] futex: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (22 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 23/34] uprobes: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 25/34] mm/frame_vector.c: " john.hubbard
` (9 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Peter Zijlstra, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, rds-devel, linux-rdma, x86, amd-gfx,
Christoph Hellwig, Jason Gunthorpe, Ingo Molnar, xen-devel,
devel, linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Darren Hart,
Dan Williams, linux-arm-kernel, linux-nfs, netdev, LKML,
linux-xfs, Thomas Gleixner, linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Darren Hart <dvhart@infradead.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
kernel/futex.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/futex.c b/kernel/futex.c
index 6d50728ef2e7..4b4cae58ec57 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -623,7 +623,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
lock_page(page);
shmem_swizzled = PageSwapCache(page) || page->mapping;
unlock_page(page);
- put_page(page);
+ put_user_page(page);
if (shmem_swizzled)
goto again;
@@ -675,7 +675,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
if (READ_ONCE(page->mapping) != mapping) {
rcu_read_unlock();
- put_page(page);
+ put_user_page(page);
goto again;
}
@@ -683,7 +683,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
inode = READ_ONCE(mapping->host);
if (!inode) {
rcu_read_unlock();
- put_page(page);
+ put_user_page(page);
goto again;
}
@@ -702,7 +702,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
*/
if (!atomic_inc_not_zero(&inode->i_count)) {
rcu_read_unlock();
- put_page(page);
+ put_user_page(page);
goto again;
}
@@ -723,7 +723,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
}
out:
- put_page(page);
+ put_user_page(page);
return err;
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 25/34] mm/frame_vector.c: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (23 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 24/34] futex: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 26/34] mm/gup_benchmark.c: " john.hubbard
` (8 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
Mel Gorman, xen-devel, devel, linux-media, Vlastimil Babka,
John Hubbard, intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
mm/frame_vector.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index c64dca6e27c2..f590badac776 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(get_vaddr_frames);
*/
void put_vaddr_frames(struct frame_vector *vec)
{
- int i;
struct page **pages;
if (!vec->got_ref)
@@ -133,8 +132,7 @@ void put_vaddr_frames(struct frame_vector *vec)
*/
if (WARN_ON(IS_ERR(pages)))
goto out;
- for (i = 0; i < vec->nr_frames; i++)
- put_page(pages[i]);
+ put_user_pages(pages, vec->nr_frames);
vec->got_ref = false;
out:
vec->nr_frames = 0;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 26/34] mm/gup_benchmark.c: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (24 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 25/34] mm/frame_vector.c: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 27/34] mm/memory.c: " john.hubbard
` (7 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Michael S . Tsirkin, Dave Hansen,
Dave Chinner, dri-devel, Keith Busch, linux-mm, sparclinux,
Dan Carpenter, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, YueHaibing, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, xen-devel, devel, linux-media, John Hubbard,
intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, Greg Kroah-Hartman,
linux-fsdevel, Kirill A . Shutemov
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Reviewed-by: Keith Busch <keith.busch@intel.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
mm/gup_benchmark.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 7dd602d7f8db..515ac8eeb6ee 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -79,7 +79,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
for (i = 0; i < nr_pages; i++) {
if (!pages[i])
break;
- put_page(pages[i]);
+ put_user_page(pages[i]);
}
end_time = ktime_get();
gup->put_delta_usec = ktime_us_delta(end_time, start_time);
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 27/34] mm/memory.c: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (25 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 26/34] mm/gup_benchmark.c: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 28/34] mm/madvise.c: " john.hubbard
` (6 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Michal Hocko, Jan Kara, kvm, Peter Zijlstra,
Dave Hansen, Dave Chinner, dri-devel, linux-mm, Matthew Wilcox,
sparclinux, Ira Weiny, ceph-devel, devel, rds-devel, linux-rdma,
Aneesh Kumar K . V, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, Huang Ying, xen-devel, devel, linux-media,
Rik van Riel, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Will Deacon,
Dan Williams, linux-arm-kernel, linux-nfs, netdev, LKML,
Souptick Joarder, linux-xfs, linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
mm/memory.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/memory.c b/mm/memory.c
index e2bb51b6242e..8870968496ea 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4337,7 +4337,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
buf, maddr + offset, bytes);
}
kunmap(page);
- put_page(page);
+ put_user_page(page);
}
len -= bytes;
buf += bytes;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 28/34] mm/madvise.c: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (26 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 27/34] mm/memory.c: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 29/34] mm/process_vm_access.c: " john.hubbard
` (5 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, Matthew Wilcox, sparclinux, Ira Weiny, ceph-devel,
devel, rds-devel, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, xen-devel, devel, linux-media, Daniel Black,
John Hubbard, intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel,
Mike Kravetz
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Daniel Black <daniel@linux.ibm.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
mm/madvise.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/madvise.c b/mm/madvise.c
index 968df3aa069f..1c6881a761a5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -672,7 +672,7 @@ static int madvise_inject_error(int behavior,
* routine is responsible for pinning the page to prevent it
* from being released back to the page allocator.
*/
- put_page(page);
+ put_user_page(page);
ret = memory_failure(pfn, 0);
if (ret)
return ret;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 29/34] mm/process_vm_access.c: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (27 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 28/34] mm/madvise.c: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 30/34] crypt: " john.hubbard
` (4 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Ingo Molnar, Christoph Hellwig,
Jason Gunthorpe, Rashika Kheria, xen-devel, devel, linux-media,
Andrea Arcangeli, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, Mike Rapoport, Mathieu Desnoyers,
linux-rpi-kernel, Jann Horn, Dan Williams, linux-arm-kernel,
linux-nfs, Lorenzo Stoakes, Heiko Carstens, netdev, LKML,
linux-xfs, linux-crypto, Christopher Yeoh, linux-fsdevel,
Al Viro
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Christopher Yeoh <cyeoh@au1.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jann Horn <jann@thejh.net>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Rashika Kheria <rashika.kheria@gmail.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
mm/process_vm_access.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 357aa7bef6c0..4d29d54ec93f 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -96,7 +96,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
flags |= FOLL_WRITE;
while (!rc && nr_pages && iov_iter_count(iter)) {
- int pages = min(nr_pages, max_pages_per_loop);
+ int pinned_pages = min(nr_pages, max_pages_per_loop);
int locked = 1;
size_t bytes;
@@ -106,14 +106,15 @@ static int process_vm_rw_single_vec(unsigned long addr,
* current/current->mm
*/
down_read(&mm->mmap_sem);
- pages = get_user_pages_remote(task, mm, pa, pages, flags,
- process_pages, NULL, &locked);
+ pinned_pages = get_user_pages_remote(task, mm, pa, pinned_pages,
+ flags, process_pages, NULL,
+ &locked);
if (locked)
up_read(&mm->mmap_sem);
- if (pages <= 0)
+ if (pinned_pages <= 0)
return -EFAULT;
- bytes = pages * PAGE_SIZE - start_offset;
+ bytes = pinned_pages * PAGE_SIZE - start_offset;
if (bytes > len)
bytes = len;
@@ -122,10 +123,9 @@ static int process_vm_rw_single_vec(unsigned long addr,
vm_write);
len -= bytes;
start_offset = 0;
- nr_pages -= pages;
- pa += pages * PAGE_SIZE;
- while (pages)
- put_page(process_pages[--pages]);
+ nr_pages -= pinned_pages;
+ pa += pinned_pages * PAGE_SIZE;
+ put_user_pages(process_pages, pinned_pages);
}
return rc;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 30/34] crypt: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (28 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 29/34] mm/process_vm_access.c: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 31/34] fs/nfs: " john.hubbard
` (3 subsequent siblings)
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
Herbert Xu, linux-rdma, x86, amd-gfx, Christoph Hellwig,
Jason Gunthorpe, xen-devel, devel, linux-media, John Hubbard,
intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel,
David S . Miller
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: David S. Miller <davem@davemloft.net>
Cc: linux-crypto@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
crypto/af_alg.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 879cf23f7489..edd358ea64da 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -428,10 +428,7 @@ static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
- int i;
-
- for (i = 0; i < sgl->npages; i++)
- put_page(sgl->pages[i]);
+ put_user_pages(sgl->pages, sgl->npages);
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
@@ -668,7 +665,7 @@ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
if (!sg_page(sg))
continue;
- put_page(sg_page(sg));
+ put_user_page(sg_page(sg));
}
sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 31/34] fs/nfs: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (29 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 30/34] crypt: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-05 0:26 ` Calum Mackay
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 32/34] goldfish_pipe: " john.hubbard
` (2 subsequent siblings)
33 siblings, 1 reply; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: Calum Mackay, linux-fbdev, Jan Kara, kvm, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, rds-devel, linux-rdma, x86, amd-gfx,
Christoph Hellwig, Jason Gunthorpe, xen-devel, devel,
linux-media, John Hubbard, intel-gfx, linux-block,
Jérôme Glisse, linux-rpi-kernel, Dan Williams,
Trond Myklebust, linux-arm-kernel, linux-nfs, netdev, LKML,
linux-xfs, linux-crypto, linux-fsdevel, Anna Schumaker
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Calum Mackay <calum.mackay@oracle.com>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Anna Schumaker <anna.schumaker@netapp.com>
Cc: linux-nfs@vger.kernel.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
fs/nfs/direct.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..c0c1b9f2c069 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -276,13 +276,6 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return nfs_file_direct_write(iocb, iter);
}
-static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
-{
- unsigned int i;
- for (i = 0; i < npages; i++)
- put_page(pages[i]);
-}
-
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq)
{
@@ -512,7 +505,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
pos += req_len;
dreq->bytes_left -= req_len;
}
- nfs_direct_release_pages(pagevec, npages);
+ put_user_pages(pagevec, npages);
kvfree(pagevec);
if (result < 0)
break;
@@ -935,7 +928,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
pos += req_len;
dreq->bytes_left -= req_len;
}
- nfs_direct_release_pages(pagevec, npages);
+ put_user_pages(pagevec, npages);
kvfree(pagevec);
if (result < 0)
break;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* Re: [Xen-devel] [PATCH v2 31/34] fs/nfs: convert put_page() to put_user_page*()
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 31/34] fs/nfs: " john.hubbard
@ 2019-08-05 0:26 ` Calum Mackay
0 siblings, 0 replies; 41+ messages in thread
From: Calum Mackay @ 2019-08-05 0:26 UTC (permalink / raw)
To: john.hubbard, Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
linux-block, Jérôme Glisse, linux-rpi-kernel,
Dan Williams, Trond Myklebust, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, linux-fsdevel,
Anna Schumaker
On 04/08/2019 11:49 pm, john.hubbard@gmail.com wrote:
> From: John Hubbard <jhubbard@nvidia.com>
>
> For pages that were retained via get_user_pages*(), release those pages
> via the new put_user_page*() routines, instead of via put_page() or
> release_pages().
>
> This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
> ("mm: introduce put_user_page*(), placeholder versions").
>
> Cc: Calum Mackay <calum.mackay@oracle.com>
> Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
> Cc: Anna Schumaker <anna.schumaker@netapp.com>
> Cc: linux-nfs@vger.kernel.org
> Signed-off-by: John Hubbard <jhubbard@nvidia.com>
> ---
> fs/nfs/direct.c | 11 ++---------
> 1 file changed, 2 insertions(+), 9 deletions(-)
Reviewed-by: Calum Mackay <calum.mackay@oracle.com>
> diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
> index 0cb442406168..c0c1b9f2c069 100644
> --- a/fs/nfs/direct.c
> +++ b/fs/nfs/direct.c
> @@ -276,13 +276,6 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
> return nfs_file_direct_write(iocb, iter);
> }
>
> -static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
> -{
> - unsigned int i;
> - for (i = 0; i < npages; i++)
> - put_page(pages[i]);
> -}
> -
> void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
> struct nfs_direct_req *dreq)
> {
> @@ -512,7 +505,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
> pos += req_len;
> dreq->bytes_left -= req_len;
> }
> - nfs_direct_release_pages(pagevec, npages);
> + put_user_pages(pagevec, npages);
> kvfree(pagevec);
> if (result < 0)
> break;
> @@ -935,7 +928,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
> pos += req_len;
> dreq->bytes_left -= req_len;
> }
> - nfs_direct_release_pages(pagevec, npages);
> + put_user_pages(pagevec, npages);
> kvfree(pagevec);
> if (result < 0)
> break;
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 32/34] goldfish_pipe: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (30 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 31/34] fs/nfs: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 33/34] kernel/events/core.c: " john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 34/34] fs/binfmt_elf: " john.hubbard
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
Roman Kiryanov, xen-devel, devel, linux-media, John Hubbard,
intel-gfx, linux-block, Jérôme Glisse,
linux-rpi-kernel, Dan Williams, linux-arm-kernel, linux-nfs,
netdev, LKML, linux-xfs, linux-crypto, Greg Kroah-Hartman,
linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Note that this effectively changes the code's behavior in
qp_release_pages(): it now ultimately calls set_page_dirty_lock(),
instead of set_page_dirty(). This is probably more accurate.
As Christoph Hellwig put it, "set_page_dirty() is only safe if we are
dealing with a file backed page where we have reference on the inode it
hangs off." [1]
[1] https://lore.kernel.org/r/20190723153640.GB720@lst.de
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Roman Kiryanov <rkir@google.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
drivers/platform/goldfish/goldfish_pipe.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index cef0133aa47a..2bd21020e288 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -288,15 +288,12 @@ static int pin_user_pages(unsigned long first_page,
static void release_user_pages(struct page **pages, int pages_count,
int is_write, s32 consumed_size)
{
- int i;
+ bool dirty = !is_write && consumed_size > 0;
- for (i = 0; i < pages_count; i++) {
- if (!is_write && consumed_size > 0)
- set_page_dirty(pages[i]);
- put_page(pages[i]);
- }
+ put_user_pages_dirty_lock(pages, pages_count, dirty);
}
+
/* Populate the call parameters, merging adjacent pages together */
static void populate_rw_params(struct page **pages,
int pages_count,
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 33/34] kernel/events/core.c: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (31 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 32/34] goldfish_pipe: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 34/34] fs/binfmt_elf: " john.hubbard
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Peter Zijlstra, Dave Hansen,
Dave Chinner, dri-devel, linux-mm, sparclinux, Ira Weiny,
ceph-devel, devel, rds-devel, linux-rdma, Jiri Olsa, x86,
amd-gfx, Christoph Hellwig, Jason Gunthorpe, Ingo Molnar,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
Arnaldo Carvalho de Melo, linux-block, Jérôme Glisse,
linux-rpi-kernel, Namhyung Kim, Dan Williams, linux-arm-kernel,
linux-nfs, netdev, LKML, Alexander Shishkin, linux-xfs,
linux-crypto, linux-fsdevel
From: John Hubbard <jhubbard@nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
kernel/events/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0463c1151bae..7be52bbbfe87 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6426,7 +6426,7 @@ static u64 perf_virt_to_phys(u64 virt)
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
if (p)
- put_page(p);
+ put_user_page(p);
}
return phys_addr;
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread
* [Xen-devel] [PATCH v2 34/34] fs/binfmt_elf: convert put_page() to put_user_page*()
2019-08-04 22:48 [Xen-devel] [PATCH v2 00/34] put_user_pages(): miscellaneous call sites john.hubbard
` (32 preceding siblings ...)
2019-08-04 22:49 ` [Xen-devel] [PATCH v2 33/34] kernel/events/core.c: " john.hubbard
@ 2019-08-04 22:49 ` john.hubbard
33 siblings, 0 replies; 41+ messages in thread
From: john.hubbard @ 2019-08-04 22:49 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-fbdev, Jan Kara, kvm, Dave Hansen, Dave Chinner, dri-devel,
linux-mm, sparclinux, Ira Weiny, ceph-devel, devel, rds-devel,
linux-rdma, x86, amd-gfx, Christoph Hellwig, Jason Gunthorpe,
xen-devel, devel, linux-media, John Hubbard, intel-gfx,
linux-block, Jérôme Glisse, linux-rpi-kernel,
Dan Williams, linux-arm-kernel, linux-nfs, netdev, LKML,
linux-xfs, linux-crypto, linux-fsdevel
From: Ira Weiny <ira.weiny@intel.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
get_dump_page calls get_user_page so put_user_page must be used
to match.
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
fs/binfmt_elf.c | 2 +-
fs/binfmt_elf_fdpic.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d4e11b2e04f6..92e4a5ca99d8 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2377,7 +2377,7 @@ static int elf_core_dump(struct coredump_params *cprm)
void *kaddr = kmap(page);
stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
kunmap(page);
- put_page(page);
+ put_user_page(page);
} else
stop = !dump_skip(cprm, PAGE_SIZE);
if (stop)
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d86ebd0dcc3d..321724b3be22 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1511,7 +1511,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
void *kaddr = kmap(page);
res = dump_emit(cprm, kaddr, PAGE_SIZE);
kunmap(page);
- put_page(page);
+ put_user_page(page);
} else {
res = dump_skip(cprm, PAGE_SIZE);
}
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 41+ messages in thread