linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] mm: Add unmap_mapping_pages
@ 2017-12-05 15:44 Matthew Wilcox
  2017-12-05 23:01 ` Ross Zwisler
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Matthew Wilcox @ 2017-12-05 15:44 UTC (permalink / raw)
  To: linux-mm; +Cc: zhangyi (F), linux-fsdevel

v2:
 - Fix inverted mask in dax.c
 - Pass 'false' instead of '0' for 'only_cows'
 - nommu definition

>From ceee2e58548a5264b61000c02371956a1da3bee4 Mon Sep 17 00:00:00 2001
From: Matthew Wilcox <mawilcox@microsoft.com>
Date: Tue, 5 Dec 2017 00:15:54 -0500
Subject: [PATCH] mm: Add unmap_mapping_pages

Several users of unmap_mapping_range() would much prefer to express
their range in pages rather than bytes.  Unfortuately, on a 32-bit
kernel, you have to remember to cast your page number to a 64-bit type
before shifting it, and four places in the current tree didn't remember
to do that.  That's a sign of a bad interface.

Conveniently, unmap_mapping_range() actually converts from bytes into
pages, so hoist the guts of unmap_mapping_range() into the new function
unmap_mapping_pages() and convert the callers which want to use pages.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Reported-by: "zhangyi (F)" <yi.zhang@huawei.com>
---
 fs/dax.c           | 19 ++++++-------------
 include/linux/mm.h | 14 ++++++++++----
 mm/khugepaged.c    |  3 +--
 mm/memory.c        | 43 +++++++++++++++++++++++++++++++------------
 mm/nommu.c         |  7 -------
 mm/truncate.c      | 23 +++++++----------------
 6 files changed, 55 insertions(+), 54 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 95981591977a..d0dc0278f067 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -44,6 +44,7 @@
 
 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
+#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
 
 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 
@@ -375,8 +376,8 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 		 * unmapped.
 		 */
 		if (pmd_downgrade && dax_is_zero_entry(entry))
-			unmap_mapping_range(mapping,
-				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
+			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+							PG_PMD_NR, false);
 
 		err = radix_tree_preload(
 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
@@ -538,12 +539,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
 		/* we are replacing a zero page with block mapping */
 		if (dax_is_pmd_entry(entry))
-			unmap_mapping_range(mapping,
-					(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
-					PMD_SIZE, 0);
+			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+							PG_PMD_NR, false);
 		else /* pte entry */
-			unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
-					PAGE_SIZE, 0);
+			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
 	}
 
 	spin_lock_irq(&mapping->tree_lock);
@@ -1269,12 +1268,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-/*
- * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
- * more often than one might expect in the below functions.
- */
-#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
-
 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		void *entry)
 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ee073146aaa7..0359ee709434 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1311,8 +1311,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
 		unsigned long end, unsigned long floor, unsigned long ceiling);
 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
 			struct vm_area_struct *vma);
-void unmap_mapping_range(struct address_space *mapping,
-		loff_t const holebegin, loff_t const holelen, int even_cows);
 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
 			     unsigned long *start, unsigned long *end,
 			     pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
@@ -1343,6 +1341,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 			    unsigned long address, unsigned int fault_flags,
 			    bool *unlocked);
+void unmap_mapping_pages(struct address_space *mapping,
+		pgoff_t start, pgoff_t nr, bool even_cows);
+void unmap_mapping_range(struct address_space *mapping,
+		loff_t const holebegin, loff_t const holelen, int even_cows);
 #else
 static inline int handle_mm_fault(struct vm_area_struct *vma,
 		unsigned long address, unsigned int flags)
@@ -1359,10 +1361,14 @@ static inline int fixup_user_fault(struct task_struct *tsk,
 	BUG();
 	return -EFAULT;
 }
+static inline void unmap_mapping_pages(struct address_space *mapping,
+		pgoff_t start, pgoff_t nr, bool even_cows) { }
+static inline void unmap_mapping_range(struct address_space *mapping,
+		loff_t const holebegin, loff_t const holelen, int even_cows) { }
 #endif
 
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
-		unsigned int gup_flags);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
+		void *buf, int len, unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
 		void *buf, int len, unsigned int gup_flags);
 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ea4ff259b671..1cd18e4347fe 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1399,8 +1399,7 @@ static void collapse_shmem(struct mm_struct *mm,
 		}
 
 		if (page_mapped(page))
-			unmap_mapping_range(mapping, index << PAGE_SHIFT,
-					PAGE_SIZE, 0);
+			unmap_mapping_pages(mapping, index, 1, false);
 
 		spin_lock_irq(&mapping->tree_lock);
 
diff --git a/mm/memory.c b/mm/memory.c
index 85e7a87da79f..1b783faffaec 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2791,9 +2791,38 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
 	}
 }
 
+/**
+ * unmap_mapping_pages() - Unmap pages from processes.
+ * @mapping: The address space containing pages to be unmapped.
+ * @start: Index of first page to be unmapped.
+ * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
+ * @even_cows: Whether to unmap even private COWed pages.
+ *
+ * Unmap the pages in this address space from any userspace process which
+ * has them mmaped.  Generally, you want to remove COWed pages as well when
+ * a file is being truncated, but not when invalidating pages from the page
+ * cache.
+ */
+void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
+		pgoff_t nr, bool even_cows)
+{
+	struct zap_details details = { };
+
+	details.check_mapping = even_cows ? NULL : mapping;
+	details.first_index = start;
+	details.last_index = start + nr - 1;
+	if (details.last_index < details.first_index)
+		details.last_index = ULONG_MAX;
+
+	i_mmap_lock_write(mapping);
+	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+		unmap_mapping_range_tree(&mapping->i_mmap, &details);
+	i_mmap_unlock_write(mapping);
+}
+
 /**
  * unmap_mapping_range - unmap the portion of all mmaps in the specified
- * address_space corresponding to the specified page range in the underlying
+ * address_space corresponding to the specified byte range in the underlying
  * file.
  *
  * @mapping: the address space containing mmaps to be unmapped.
@@ -2811,7 +2840,6 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
 void unmap_mapping_range(struct address_space *mapping,
 		loff_t const holebegin, loff_t const holelen, int even_cows)
 {
-	struct zap_details details = { };
 	pgoff_t hba = holebegin >> PAGE_SHIFT;
 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
@@ -2823,16 +2851,7 @@ void unmap_mapping_range(struct address_space *mapping,
 			hlen = ULONG_MAX - hba + 1;
 	}
 
-	details.check_mapping = even_cows ? NULL : mapping;
-	details.first_index = hba;
-	details.last_index = hba + hlen - 1;
-	if (details.last_index < details.first_index)
-		details.last_index = ULONG_MAX;
-
-	i_mmap_lock_write(mapping);
-	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
-		unmap_mapping_range_tree(&mapping->i_mmap, &details);
-	i_mmap_unlock_write(mapping);
+	unmap_mapping_pages(mapping, hba, hlen, even_cows);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
diff --git a/mm/nommu.c b/mm/nommu.c
index 17c00d93de2e..4b9864b17cb0 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1788,13 +1788,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
 	return -ENOMEM;
 }
 
-void unmap_mapping_range(struct address_space *mapping,
-			 loff_t const holebegin, loff_t const holelen,
-			 int even_cows)
-{
-}
-EXPORT_SYMBOL(unmap_mapping_range);
-
 int filemap_fault(struct vm_fault *vmf)
 {
 	BUG();
diff --git a/mm/truncate.c b/mm/truncate.c
index e4b4cf0f4070..c34e2fd4f583 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -179,12 +179,8 @@ static void
 truncate_cleanup_page(struct address_space *mapping, struct page *page)
 {
 	if (page_mapped(page)) {
-		loff_t holelen;
-
-		holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
-		unmap_mapping_range(mapping,
-				   (loff_t)page->index << PAGE_SHIFT,
-				   holelen, 0);
+		pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
+		unmap_mapping_pages(mapping, page->index, nr, false);
 	}
 
 	if (page_has_private(page))
@@ -715,19 +711,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 					/*
 					 * Zap the rest of the file in one hit.
 					 */
-					unmap_mapping_range(mapping,
-					   (loff_t)index << PAGE_SHIFT,
-					   (loff_t)(1 + end - index)
-							 << PAGE_SHIFT,
-							 0);
+					unmap_mapping_pages(mapping, index,
+						(1 + end - index), false);
 					did_range_unmap = 1;
 				} else {
 					/*
 					 * Just zap this page
 					 */
-					unmap_mapping_range(mapping,
-					   (loff_t)index << PAGE_SHIFT,
-					   PAGE_SIZE, 0);
+					unmap_mapping_pages(mapping, index,
+								1, false);
 				}
 			}
 			BUG_ON(page_mapped(page));
@@ -753,8 +745,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 	 * get remapped later.
 	 */
 	if (dax_mapping(mapping)) {
-		unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
-				    (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
+		unmap_mapping_pages(mapping, start, end - start + 1, false);
 	}
 out:
 	cleancache_invalidate_inode(mapping);
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] mm: Add unmap_mapping_pages
  2017-12-05 15:44 [PATCH v2] mm: Add unmap_mapping_pages Matthew Wilcox
@ 2017-12-05 23:01 ` Ross Zwisler
  2017-12-06 14:26 ` [PATCH v3] " Matthew Wilcox
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 9+ messages in thread
From: Ross Zwisler @ 2017-12-05 23:01 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, zhangyi (F), linux-fsdevel

On Tue, Dec 05, 2017 at 07:44:53AM -0800, Matthew Wilcox wrote:
> v2:
>  - Fix inverted mask in dax.c
>  - Pass 'false' instead of '0' for 'only_cows'
>  - nommu definition
> 
> >From ceee2e58548a5264b61000c02371956a1da3bee4 Mon Sep 17 00:00:00 2001
> From: Matthew Wilcox <mawilcox@microsoft.com>
> Date: Tue, 5 Dec 2017 00:15:54 -0500
> Subject: [PATCH] mm: Add unmap_mapping_pages
> 
> Several users of unmap_mapping_range() would much prefer to express
> their range in pages rather than bytes.  Unfortuately, on a 32-bit
> kernel, you have to remember to cast your page number to a 64-bit type
> before shifting it, and four places in the current tree didn't remember
> to do that.  That's a sign of a bad interface.
> 
> Conveniently, unmap_mapping_range() actually converts from bytes into
> pages, so hoist the guts of unmap_mapping_range() into the new function
> unmap_mapping_pages() and convert the callers which want to use pages.
> 
> Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
> Reported-by: "zhangyi (F)" <yi.zhang@huawei.com>

Looks good.  You can add:

Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v3] mm: Add unmap_mapping_pages
  2017-12-05 15:44 [PATCH v2] mm: Add unmap_mapping_pages Matthew Wilcox
  2017-12-05 23:01 ` Ross Zwisler
@ 2017-12-06 14:26 ` Matthew Wilcox
  2017-12-06 18:26   ` Ross Zwisler
  2017-12-10 10:55   ` Kirill A. Shutemov
  2017-12-08  2:10 ` [PATCH v2] " kbuild test robot
  2017-12-08  2:38 ` kbuild test robot
  3 siblings, 2 replies; 9+ messages in thread
From: Matthew Wilcox @ 2017-12-06 14:26 UTC (permalink / raw)
  To: linux-mm; +Cc: zhangyi (F), linux-fsdevel, Ross Zwisler

v3:
 - Fix compilation
   (I forgot to git commit --amend)
 - Added Ross' Reviewed-by
v2:
 - Fix inverted mask in dax.c
 - Pass 'false' instead of '0' for 'only_cows'
 - nommu definition

--- 8< ---

>From df142c51e111f7c386f594d5443530ea17abba5f Mon Sep 17 00:00:00 2001
From: Matthew Wilcox <mawilcox@microsoft.com>
Date: Tue, 5 Dec 2017 00:15:54 -0500
Subject: [PATCH v3] mm: Add unmap_mapping_pages

Several users of unmap_mapping_range() would prefer to express their
range in pages rather than bytes.  Unfortuately, on a 32-bit kernel,
you have to remember to cast your page number to a 64-bit type before
shifting it, and four places in the current tree didn't remember to
do that.  That's a sign of a bad interface.

Conveniently, unmap_mapping_range() actually converts from bytes into
pages, so hoist the guts of unmap_mapping_range() into a new function
unmap_mapping_pages() and convert the callers which want to use pages.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Reported-by: "zhangyi (F)" <yi.zhang@huawei.com>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
---
 fs/dax.c           | 19 ++++++-------------
 include/linux/mm.h | 26 ++++++++++++++++----------
 mm/khugepaged.c    |  3 +--
 mm/memory.c        | 43 +++++++++++++++++++++++++++++++------------
 mm/nommu.c         |  7 -------
 mm/truncate.c      | 23 +++++++----------------
 6 files changed, 61 insertions(+), 60 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 95981591977a..d0dc0278f067 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -44,6 +44,7 @@
 
 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
+#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
 
 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 
@@ -375,8 +376,8 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 		 * unmapped.
 		 */
 		if (pmd_downgrade && dax_is_zero_entry(entry))
-			unmap_mapping_range(mapping,
-				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
+			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+							PG_PMD_NR, false);
 
 		err = radix_tree_preload(
 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
@@ -538,12 +539,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
 		/* we are replacing a zero page with block mapping */
 		if (dax_is_pmd_entry(entry))
-			unmap_mapping_range(mapping,
-					(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
-					PMD_SIZE, 0);
+			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+							PG_PMD_NR, false);
 		else /* pte entry */
-			unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
-					PAGE_SIZE, 0);
+			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
 	}
 
 	spin_lock_irq(&mapping->tree_lock);
@@ -1269,12 +1268,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-/*
- * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
- * more often than one might expect in the below functions.
- */
-#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
-
 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		void *entry)
 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ee073146aaa7..283d352c4e11 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1311,8 +1311,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
 		unsigned long end, unsigned long floor, unsigned long ceiling);
 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
 			struct vm_area_struct *vma);
-void unmap_mapping_range(struct address_space *mapping,
-		loff_t const holebegin, loff_t const holelen, int even_cows);
 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
 			     unsigned long *start, unsigned long *end,
 			     pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
@@ -1323,12 +1321,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 			void *buf, int len, int write);
 
-static inline void unmap_shared_mapping_range(struct address_space *mapping,
-		loff_t const holebegin, loff_t const holelen)
-{
-	unmap_mapping_range(mapping, holebegin, holelen, 0);
-}
-
 extern void truncate_pagecache(struct inode *inode, loff_t new);
 extern void truncate_setsize(struct inode *inode, loff_t newsize);
 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -1343,6 +1335,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 			    unsigned long address, unsigned int fault_flags,
 			    bool *unlocked);
+void unmap_mapping_pages(struct address_space *mapping,
+		pgoff_t start, pgoff_t nr, bool even_cows);
+void unmap_mapping_range(struct address_space *mapping,
+		loff_t const holebegin, loff_t const holelen, int even_cows);
 #else
 static inline int handle_mm_fault(struct vm_area_struct *vma,
 		unsigned long address, unsigned int flags)
@@ -1359,10 +1355,20 @@ static inline int fixup_user_fault(struct task_struct *tsk,
 	BUG();
 	return -EFAULT;
 }
+static inline void unmap_mapping_pages(struct address_space *mapping,
+		pgoff_t start, pgoff_t nr, bool even_cows) { }
+static inline void unmap_mapping_range(struct address_space *mapping,
+		loff_t const holebegin, loff_t const holelen, int even_cows) { }
 #endif
 
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
-		unsigned int gup_flags);
+static inline void unmap_shared_mapping_range(struct address_space *mapping,
+		loff_t const holebegin, loff_t const holelen)
+{
+	unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
+		void *buf, int len, unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
 		void *buf, int len, unsigned int gup_flags);
 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ea4ff259b671..1cd18e4347fe 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1399,8 +1399,7 @@ static void collapse_shmem(struct mm_struct *mm,
 		}
 
 		if (page_mapped(page))
-			unmap_mapping_range(mapping, index << PAGE_SHIFT,
-					PAGE_SIZE, 0);
+			unmap_mapping_pages(mapping, index, 1, false);
 
 		spin_lock_irq(&mapping->tree_lock);
 
diff --git a/mm/memory.c b/mm/memory.c
index 85e7a87da79f..1b783faffaec 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2791,9 +2791,38 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
 	}
 }
 
+/**
+ * unmap_mapping_pages() - Unmap pages from processes.
+ * @mapping: The address space containing pages to be unmapped.
+ * @start: Index of first page to be unmapped.
+ * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
+ * @even_cows: Whether to unmap even private COWed pages.
+ *
+ * Unmap the pages in this address space from any userspace process which
+ * has them mmaped.  Generally, you want to remove COWed pages as well when
+ * a file is being truncated, but not when invalidating pages from the page
+ * cache.
+ */
+void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
+		pgoff_t nr, bool even_cows)
+{
+	struct zap_details details = { };
+
+	details.check_mapping = even_cows ? NULL : mapping;
+	details.first_index = start;
+	details.last_index = start + nr - 1;
+	if (details.last_index < details.first_index)
+		details.last_index = ULONG_MAX;
+
+	i_mmap_lock_write(mapping);
+	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+		unmap_mapping_range_tree(&mapping->i_mmap, &details);
+	i_mmap_unlock_write(mapping);
+}
+
 /**
  * unmap_mapping_range - unmap the portion of all mmaps in the specified
- * address_space corresponding to the specified page range in the underlying
+ * address_space corresponding to the specified byte range in the underlying
  * file.
  *
  * @mapping: the address space containing mmaps to be unmapped.
@@ -2811,7 +2840,6 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
 void unmap_mapping_range(struct address_space *mapping,
 		loff_t const holebegin, loff_t const holelen, int even_cows)
 {
-	struct zap_details details = { };
 	pgoff_t hba = holebegin >> PAGE_SHIFT;
 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
@@ -2823,16 +2851,7 @@ void unmap_mapping_range(struct address_space *mapping,
 			hlen = ULONG_MAX - hba + 1;
 	}
 
-	details.check_mapping = even_cows ? NULL : mapping;
-	details.first_index = hba;
-	details.last_index = hba + hlen - 1;
-	if (details.last_index < details.first_index)
-		details.last_index = ULONG_MAX;
-
-	i_mmap_lock_write(mapping);
-	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
-		unmap_mapping_range_tree(&mapping->i_mmap, &details);
-	i_mmap_unlock_write(mapping);
+	unmap_mapping_pages(mapping, hba, hlen, even_cows);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
diff --git a/mm/nommu.c b/mm/nommu.c
index 17c00d93de2e..4b9864b17cb0 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1788,13 +1788,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
 	return -ENOMEM;
 }
 
-void unmap_mapping_range(struct address_space *mapping,
-			 loff_t const holebegin, loff_t const holelen,
-			 int even_cows)
-{
-}
-EXPORT_SYMBOL(unmap_mapping_range);
-
 int filemap_fault(struct vm_fault *vmf)
 {
 	BUG();
diff --git a/mm/truncate.c b/mm/truncate.c
index e4b4cf0f4070..c34e2fd4f583 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -179,12 +179,8 @@ static void
 truncate_cleanup_page(struct address_space *mapping, struct page *page)
 {
 	if (page_mapped(page)) {
-		loff_t holelen;
-
-		holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
-		unmap_mapping_range(mapping,
-				   (loff_t)page->index << PAGE_SHIFT,
-				   holelen, 0);
+		pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
+		unmap_mapping_pages(mapping, page->index, nr, false);
 	}
 
 	if (page_has_private(page))
@@ -715,19 +711,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 					/*
 					 * Zap the rest of the file in one hit.
 					 */
-					unmap_mapping_range(mapping,
-					   (loff_t)index << PAGE_SHIFT,
-					   (loff_t)(1 + end - index)
-							 << PAGE_SHIFT,
-							 0);
+					unmap_mapping_pages(mapping, index,
+						(1 + end - index), false);
 					did_range_unmap = 1;
 				} else {
 					/*
 					 * Just zap this page
 					 */
-					unmap_mapping_range(mapping,
-					   (loff_t)index << PAGE_SHIFT,
-					   PAGE_SIZE, 0);
+					unmap_mapping_pages(mapping, index,
+								1, false);
 				}
 			}
 			BUG_ON(page_mapped(page));
@@ -753,8 +745,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 	 * get remapped later.
 	 */
 	if (dax_mapping(mapping)) {
-		unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
-				    (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
+		unmap_mapping_pages(mapping, start, end - start + 1, false);
 	}
 out:
 	cleancache_invalidate_inode(mapping);
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v3] mm: Add unmap_mapping_pages
  2017-12-06 14:26 ` [PATCH v3] " Matthew Wilcox
@ 2017-12-06 18:26   ` Ross Zwisler
  2017-12-10 10:55   ` Kirill A. Shutemov
  1 sibling, 0 replies; 9+ messages in thread
From: Ross Zwisler @ 2017-12-06 18:26 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, zhangyi (F), linux-fsdevel, Ross Zwisler

On Wed, Dec 06, 2017 at 06:26:27AM -0800, Matthew Wilcox wrote:
> v3:
>  - Fix compilation
>    (I forgot to git commit --amend)
>  - Added Ross' Reviewed-by
> v2:
>  - Fix inverted mask in dax.c
>  - Pass 'false' instead of '0' for 'only_cows'
>  - nommu definition
> 
> --- 8< ---
> 
> From df142c51e111f7c386f594d5443530ea17abba5f Mon Sep 17 00:00:00 2001
> From: Matthew Wilcox <mawilcox@microsoft.com>
> Date: Tue, 5 Dec 2017 00:15:54 -0500
> Subject: [PATCH v3] mm: Add unmap_mapping_pages

Just FYI, the above scissors doesn't allow me to apply the patch using git
version 2.14.3:

  $ git am --scissors  ~/patch/out.patch
  Patch is empty.
  When you have resolved this problem, run "git am --continue".
  If you prefer to skip this patch, run "git am --skip" instead.
  To restore the original branch and stop patching, run "git am --abort".

It's mad about the second set of mail headers in the body of your mail, and
tries to separate into a second patch.

You can get around this either by a) not having the second set of headers
(From:, Date:, Subject:), or by including the extra info in a separate block
below the --- line, i.e.:

  ...
  Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
  Reported-by: "zhangyi (F)" <yi.zhang@huawei.com>
  Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
  ---
  
  v3:
   - Fix compilation
     (I forgot to git commit --amend)
   - Added Ross' Reviewed-by
  v2:
   - Fix inverted mask in dax.c
   - Pass 'false' instead of '0' for 'only_cows'
   - nommu definition
  
  ---
   fs/dax.c           | 19 ++++++-------------
   include/linux/mm.h | 26 ++++++++++++++++----------
  ...

- Ross

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] mm: Add unmap_mapping_pages
  2017-12-05 15:44 [PATCH v2] mm: Add unmap_mapping_pages Matthew Wilcox
  2017-12-05 23:01 ` Ross Zwisler
  2017-12-06 14:26 ` [PATCH v3] " Matthew Wilcox
@ 2017-12-08  2:10 ` kbuild test robot
  2017-12-08  2:38 ` kbuild test robot
  3 siblings, 0 replies; 9+ messages in thread
From: kbuild test robot @ 2017-12-08  2:10 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: kbuild-all, linux-mm, zhangyi (F), linux-fsdevel

[-- Attachment #1: Type: text/plain, Size: 8840 bytes --]

Hi Matthew,

I love your patch! Yet something to improve:

[auto build test ERROR on mmotm/master]
[also build test ERROR on v4.15-rc2 next-20171207]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Matthew-Wilcox/mm-Add-unmap_mapping_pages/20171208-072634
base:   git://git.cmpxchg.org/linux-mmotm.git master
config: i386-tinyconfig (attached as .config)
compiler: gcc-7 (Debian 7.2.0-12) 7.2.1 20171025
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All error/warnings (new ones prefixed by >>):

   In file included from include/linux/memcontrol.h:29:0,
                    from include/linux/swap.h:9,
                    from include/linux/suspend.h:5,
                    from arch/x86/kernel/asm-offsets.c:13:
   include/linux/mm.h: In function 'unmap_shared_mapping_range':
>> include/linux/mm.h:1328:2: error: implicit declaration of function 'unmap_mapping_range'; did you mean 'unmap_shared_mapping_range'? [-Werror=implicit-function-declaration]
     unmap_mapping_range(mapping, holebegin, holelen, 0);
     ^~~~~~~~~~~~~~~~~~~
     unmap_shared_mapping_range
   include/linux/mm.h: At top level:
>> include/linux/mm.h:1347:6: warning: conflicting types for 'unmap_mapping_range'
    void unmap_mapping_range(struct address_space *mapping,
         ^~~~~~~~~~~~~~~~~~~
   include/linux/mm.h:1328:2: note: previous implicit declaration of 'unmap_mapping_range' was here
     unmap_mapping_range(mapping, holebegin, holelen, 0);
     ^~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors
   make[2]: *** [arch/x86/kernel/asm-offsets.s] Error 1
   make[2]: Target '__build' not remade because of errors.
   make[1]: *** [prepare0] Error 2
   make[1]: Target 'prepare' not remade because of errors.
   make: *** [sub-make] Error 2

vim +1328 include/linux/mm.h

e6473092bd Matt Mackall                  2008-02-04  1307  
2165009bdf Dave Hansen                   2008-06-12  1308  int walk_page_range(unsigned long addr, unsigned long end,
2165009bdf Dave Hansen                   2008-06-12  1309  		struct mm_walk *walk);
900fc5f197 Naoya Horiguchi               2015-02-11  1310  int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
42b7772812 Jan Beulich                   2008-07-23  1311  void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
3bf5ee9564 Hugh Dickins                  2005-04-19  1312  		unsigned long end, unsigned long floor, unsigned long ceiling);
^1da177e4c Linus Torvalds                2005-04-16  1313  int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
^1da177e4c Linus Torvalds                2005-04-16  1314  			struct vm_area_struct *vma);
0979639595 Ross Zwisler                  2017-01-10  1315  int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
a4d1a88525 J�r�me Glisse                 2017-08-31  1316  			     unsigned long *start, unsigned long *end,
0979639595 Ross Zwisler                  2017-01-10  1317  			     pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
3b6748e2dd Johannes Weiner               2009-06-16  1318  int follow_pfn(struct vm_area_struct *vma, unsigned long address,
3b6748e2dd Johannes Weiner               2009-06-16  1319  	unsigned long *pfn);
d87fe6607c venkatesh.pallipadi@intel.com 2008-12-19  1320  int follow_phys(struct vm_area_struct *vma, unsigned long address,
d87fe6607c venkatesh.pallipadi@intel.com 2008-12-19  1321  		unsigned int flags, unsigned long *prot, resource_size_t *phys);
28b2ee20c7 Rik van Riel                  2008-07-23  1322  int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
28b2ee20c7 Rik van Riel                  2008-07-23  1323  			void *buf, int len, int write);
^1da177e4c Linus Torvalds                2005-04-16  1324  
^1da177e4c Linus Torvalds                2005-04-16  1325  static inline void unmap_shared_mapping_range(struct address_space *mapping,
^1da177e4c Linus Torvalds                2005-04-16  1326  		loff_t const holebegin, loff_t const holelen)
^1da177e4c Linus Torvalds                2005-04-16  1327  {
^1da177e4c Linus Torvalds                2005-04-16 @1328  	unmap_mapping_range(mapping, holebegin, holelen, 0);
^1da177e4c Linus Torvalds                2005-04-16  1329  }
^1da177e4c Linus Torvalds                2005-04-16  1330  
7caef26767 Kirill A. Shutemov            2013-09-12  1331  extern void truncate_pagecache(struct inode *inode, loff_t new);
2c27c65ed0 Christoph Hellwig             2010-06-04  1332  extern void truncate_setsize(struct inode *inode, loff_t newsize);
90a8020278 Jan Kara                      2014-10-01  1333  void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
623e3db9f9 Hugh Dickins                  2012-03-28  1334  void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
750b4987b0 Nick Piggin                   2009-09-16  1335  int truncate_inode_page(struct address_space *mapping, struct page *page);
2571873621 Andi Kleen                    2009-09-16  1336  int generic_error_remove_page(struct address_space *mapping, struct page *page);
83f786680a Wu Fengguang                  2009-09-16  1337  int invalidate_inode_page(struct page *page);
83f786680a Wu Fengguang                  2009-09-16  1338  
7ee1dd3fee David Howells                 2006-01-06  1339  #ifdef CONFIG_MMU
dcddffd41d Kirill A. Shutemov            2016-07-26  1340  extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
dcddffd41d Kirill A. Shutemov            2016-07-26  1341  		unsigned int flags);
5c723ba5b7 Peter Zijlstra                2011-07-27  1342  extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
4a9e1cda27 Dominik Dingel                2016-01-15  1343  			    unsigned long address, unsigned int fault_flags,
4a9e1cda27 Dominik Dingel                2016-01-15  1344  			    bool *unlocked);
dcd7006c23 Matthew Wilcox                2017-12-05  1345  void unmap_mapping_pages(struct address_space *mapping,
dcd7006c23 Matthew Wilcox                2017-12-05  1346  		pgoff_t start, pgoff_t nr, bool even_cows);
dcd7006c23 Matthew Wilcox                2017-12-05 @1347  void unmap_mapping_range(struct address_space *mapping,
dcd7006c23 Matthew Wilcox                2017-12-05  1348  		loff_t const holebegin, loff_t const holelen, int even_cows);
7ee1dd3fee David Howells                 2006-01-06  1349  #else
dcddffd41d Kirill A. Shutemov            2016-07-26  1350  static inline int handle_mm_fault(struct vm_area_struct *vma,
dcddffd41d Kirill A. Shutemov            2016-07-26  1351  		unsigned long address, unsigned int flags)
7ee1dd3fee David Howells                 2006-01-06  1352  {
7ee1dd3fee David Howells                 2006-01-06  1353  	/* should never happen if there's no MMU */
7ee1dd3fee David Howells                 2006-01-06  1354  	BUG();
7ee1dd3fee David Howells                 2006-01-06  1355  	return VM_FAULT_SIGBUS;
7ee1dd3fee David Howells                 2006-01-06  1356  }
5c723ba5b7 Peter Zijlstra                2011-07-27  1357  static inline int fixup_user_fault(struct task_struct *tsk,
5c723ba5b7 Peter Zijlstra                2011-07-27  1358  		struct mm_struct *mm, unsigned long address,
4a9e1cda27 Dominik Dingel                2016-01-15  1359  		unsigned int fault_flags, bool *unlocked)
5c723ba5b7 Peter Zijlstra                2011-07-27  1360  {
5c723ba5b7 Peter Zijlstra                2011-07-27  1361  	/* should never happen if there's no MMU */
5c723ba5b7 Peter Zijlstra                2011-07-27  1362  	BUG();
5c723ba5b7 Peter Zijlstra                2011-07-27  1363  	return -EFAULT;
5c723ba5b7 Peter Zijlstra                2011-07-27  1364  }
dcd7006c23 Matthew Wilcox                2017-12-05  1365  static inline void unmap_mapping_pages(struct address_space *mapping,
dcd7006c23 Matthew Wilcox                2017-12-05  1366  		pgoff_t start, pgoff_t nr, bool even_cows) { }
dcd7006c23 Matthew Wilcox                2017-12-05  1367  static inline void unmap_mapping_range(struct address_space *mapping,
dcd7006c23 Matthew Wilcox                2017-12-05  1368  		loff_t const holebegin, loff_t const holelen, int even_cows) { }
7ee1dd3fee David Howells                 2006-01-06  1369  #endif
f33ea7f404 Nick Piggin                   2005-08-03  1370  

:::::: The code at line 1328 was first introduced by commit
:::::: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Linux-2.6.12-rc2

:::::: TO: Linus Torvalds <torvalds@ppc970.osdl.org>
:::::: CC: Linus Torvalds <torvalds@ppc970.osdl.org>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 6795 bytes --]

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] mm: Add unmap_mapping_pages
  2017-12-05 15:44 [PATCH v2] mm: Add unmap_mapping_pages Matthew Wilcox
                   ` (2 preceding siblings ...)
  2017-12-08  2:10 ` [PATCH v2] " kbuild test robot
@ 2017-12-08  2:38 ` kbuild test robot
  2017-12-09  1:36   ` Matthew Wilcox
  3 siblings, 1 reply; 9+ messages in thread
From: kbuild test robot @ 2017-12-08  2:38 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: kbuild-all, linux-mm, zhangyi (F), linux-fsdevel

[-- Attachment #1: Type: text/plain, Size: 4672 bytes --]

Hi Matthew,

I love your patch! Yet something to improve:

[auto build test ERROR on mmotm/master]
[also build test ERROR on v4.15-rc2 next-20171207]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Matthew-Wilcox/mm-Add-unmap_mapping_pages/20171208-072634
base:   git://git.cmpxchg.org/linux-mmotm.git master
config: i386-randconfig-a0-201749 (attached as .config)
compiler: gcc-4.9 (Debian 4.9.4-2) 4.9.4
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   In file included from include/linux/memcontrol.h:29:0,
                    from include/linux/swap.h:9,
                    from include/linux/suspend.h:5,
                    from arch/x86/kernel/asm-offsets.c:13:
   include/linux/mm.h: In function 'unmap_shared_mapping_range':
>> include/linux/mm.h:1328:2: error: implicit declaration of function 'unmap_mapping_range' [-Werror=implicit-function-declaration]
     unmap_mapping_range(mapping, holebegin, holelen, 0);
     ^
   include/linux/mm.h: At top level:
   include/linux/mm.h:1347:6: warning: conflicting types for 'unmap_mapping_range'
    void unmap_mapping_range(struct address_space *mapping,
         ^
   include/linux/mm.h:1328:2: note: previous implicit declaration of 'unmap_mapping_range' was here
     unmap_mapping_range(mapping, holebegin, holelen, 0);
     ^
   cc1: some warnings being treated as errors
   make[2]: *** [arch/x86/kernel/asm-offsets.s] Error 1
   make[2]: Target '__build' not remade because of errors.
   make[1]: *** [prepare0] Error 2
   make[1]: Target 'prepare' not remade because of errors.
   make: *** [sub-make] Error 2

vim +/unmap_mapping_range +1328 include/linux/mm.h

e6473092b Matt Mackall                  2008-02-04  1307  
2165009bd Dave Hansen                   2008-06-12  1308  int walk_page_range(unsigned long addr, unsigned long end,
2165009bd Dave Hansen                   2008-06-12  1309  		struct mm_walk *walk);
900fc5f19 Naoya Horiguchi               2015-02-11  1310  int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
42b777281 Jan Beulich                   2008-07-23  1311  void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
3bf5ee956 Hugh Dickins                  2005-04-19  1312  		unsigned long end, unsigned long floor, unsigned long ceiling);
^1da177e4 Linus Torvalds                2005-04-16  1313  int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
^1da177e4 Linus Torvalds                2005-04-16  1314  			struct vm_area_struct *vma);
097963959 Ross Zwisler                  2017-01-10  1315  int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
a4d1a8852 J�r�me Glisse                 2017-08-31  1316  			     unsigned long *start, unsigned long *end,
097963959 Ross Zwisler                  2017-01-10  1317  			     pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
3b6748e2d Johannes Weiner               2009-06-16  1318  int follow_pfn(struct vm_area_struct *vma, unsigned long address,
3b6748e2d Johannes Weiner               2009-06-16  1319  	unsigned long *pfn);
d87fe6607 venkatesh.pallipadi@intel.com 2008-12-19  1320  int follow_phys(struct vm_area_struct *vma, unsigned long address,
d87fe6607 venkatesh.pallipadi@intel.com 2008-12-19  1321  		unsigned int flags, unsigned long *prot, resource_size_t *phys);
28b2ee20c Rik van Riel                  2008-07-23  1322  int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
28b2ee20c Rik van Riel                  2008-07-23  1323  			void *buf, int len, int write);
^1da177e4 Linus Torvalds                2005-04-16  1324  
^1da177e4 Linus Torvalds                2005-04-16  1325  static inline void unmap_shared_mapping_range(struct address_space *mapping,
^1da177e4 Linus Torvalds                2005-04-16  1326  		loff_t const holebegin, loff_t const holelen)
^1da177e4 Linus Torvalds                2005-04-16  1327  {
^1da177e4 Linus Torvalds                2005-04-16 @1328  	unmap_mapping_range(mapping, holebegin, holelen, 0);
^1da177e4 Linus Torvalds                2005-04-16  1329  }
^1da177e4 Linus Torvalds                2005-04-16  1330  

:::::: The code at line 1328 was first introduced by commit
:::::: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Linux-2.6.12-rc2

:::::: TO: Linus Torvalds <torvalds@ppc970.osdl.org>
:::::: CC: Linus Torvalds <torvalds@ppc970.osdl.org>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 29260 bytes --]

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] mm: Add unmap_mapping_pages
  2017-12-08  2:38 ` kbuild test robot
@ 2017-12-09  1:36   ` Matthew Wilcox
  2017-12-09  2:42     ` Fengguang Wu
  0 siblings, 1 reply; 9+ messages in thread
From: Matthew Wilcox @ 2017-12-09  1:36 UTC (permalink / raw)
  To: kbuild test robot; +Cc: kbuild-all, linux-mm, zhangyi (F), linux-fsdevel

On Fri, Dec 08, 2017 at 10:38:55AM +0800, kbuild test robot wrote:
> Hi Matthew,
> 
> I love your patch! Yet something to improve:

You missed v3, kbuild robot?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] mm: Add unmap_mapping_pages
  2017-12-09  1:36   ` Matthew Wilcox
@ 2017-12-09  2:42     ` Fengguang Wu
  0 siblings, 0 replies; 9+ messages in thread
From: Fengguang Wu @ 2017-12-09  2:42 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: kbuild-all, linux-mm, zhangyi (F), linux-fsdevel, Ye Xiaolong

CC Xiaolong.

On Fri, Dec 08, 2017 at 05:36:24PM -0800, Matthew Wilcox wrote:
>On Fri, Dec 08, 2017 at 10:38:55AM +0800, kbuild test robot wrote:
>> Hi Matthew,
>>
>> I love your patch! Yet something to improve:
>
>You missed v3, kbuild robot?

Yeah indeed. Something went wrong and the patch service log has not
been updated for 3 days.. Let's check it.

Thanks,
Fengguang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3] mm: Add unmap_mapping_pages
  2017-12-06 14:26 ` [PATCH v3] " Matthew Wilcox
  2017-12-06 18:26   ` Ross Zwisler
@ 2017-12-10 10:55   ` Kirill A. Shutemov
  1 sibling, 0 replies; 9+ messages in thread
From: Kirill A. Shutemov @ 2017-12-10 10:55 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, zhangyi (F), linux-fsdevel, Ross Zwisler

On Wed, Dec 06, 2017 at 06:26:27AM -0800, Matthew Wilcox wrote:
> v3:
>  - Fix compilation
>    (I forgot to git commit --amend)
>  - Added Ross' Reviewed-by
> v2:
>  - Fix inverted mask in dax.c
>  - Pass 'false' instead of '0' for 'only_cows'
>  - nommu definition
> 
> --- 8< ---
> 
> From df142c51e111f7c386f594d5443530ea17abba5f Mon Sep 17 00:00:00 2001
> From: Matthew Wilcox <mawilcox@microsoft.com>
> Date: Tue, 5 Dec 2017 00:15:54 -0500
> Subject: [PATCH v3] mm: Add unmap_mapping_pages
> 
> Several users of unmap_mapping_range() would prefer to express their
> range in pages rather than bytes.  Unfortuately, on a 32-bit kernel,
> you have to remember to cast your page number to a 64-bit type before
> shifting it, and four places in the current tree didn't remember to
> do that.  That's a sign of a bad interface.
> 
> Conveniently, unmap_mapping_range() actually converts from bytes into
> pages, so hoist the guts of unmap_mapping_range() into a new function
> unmap_mapping_pages() and convert the callers which want to use pages.
> 
> Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
> Reported-by: "zhangyi (F)" <yi.zhang@huawei.com>
> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>

Looks good to me.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-12-10 10:55 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-05 15:44 [PATCH v2] mm: Add unmap_mapping_pages Matthew Wilcox
2017-12-05 23:01 ` Ross Zwisler
2017-12-06 14:26 ` [PATCH v3] " Matthew Wilcox
2017-12-06 18:26   ` Ross Zwisler
2017-12-10 10:55   ` Kirill A. Shutemov
2017-12-08  2:10 ` [PATCH v2] " kbuild test robot
2017-12-08  2:38 ` kbuild test robot
2017-12-09  1:36   ` Matthew Wilcox
2017-12-09  2:42     ` Fengguang Wu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).