All of lore.kernel.org
 help / color / mirror / Atom feed
diff for duplicates of <20171205154453.GD28760@bombadil.infradead.org>

diff --git a/a/1.txt b/N1/1.txt
index 54a19c8..9d60cfa 100644
--- a/a/1.txt
+++ b/N1/1.txt
@@ -1,287 +1,4 @@
 v2:
  - Fix inverted mask in dax.c
  - Pass 'false' instead of '0' for 'only_cows'
- - nommu definition
-
->From ceee2e58548a5264b61000c02371956a1da3bee4 Mon Sep 17 00:00:00 2001
-From: Matthew Wilcox <mawilcox@microsoft.com>
-Date: Tue, 5 Dec 2017 00:15:54 -0500
-Subject: [PATCH] mm: Add unmap_mapping_pages
-
-Several users of unmap_mapping_range() would much prefer to express
-their range in pages rather than bytes.  Unfortuately, on a 32-bit
-kernel, you have to remember to cast your page number to a 64-bit type
-before shifting it, and four places in the current tree didn't remember
-to do that.  That's a sign of a bad interface.
-
-Conveniently, unmap_mapping_range() actually converts from bytes into
-pages, so hoist the guts of unmap_mapping_range() into the new function
-unmap_mapping_pages() and convert the callers which want to use pages.
-
-Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
-Reported-by: "zhangyi (F)" <yi.zhang@huawei.com>
----
- fs/dax.c           | 19 ++++++-------------
- include/linux/mm.h | 14 ++++++++++----
- mm/khugepaged.c    |  3 +--
- mm/memory.c        | 43 +++++++++++++++++++++++++++++++------------
- mm/nommu.c         |  7 -------
- mm/truncate.c      | 23 +++++++----------------
- 6 files changed, 55 insertions(+), 54 deletions(-)
-
-diff --git a/fs/dax.c b/fs/dax.c
-index 95981591977a..d0dc0278f067 100644
---- a/fs/dax.c
-+++ b/fs/dax.c
-@@ -44,6 +44,7 @@
- 
- /* The 'colour' (ie low bits) within a PMD of a page offset.  */
- #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
-+#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
- 
- static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
- 
-@@ -375,8 +376,8 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
- 		 * unmapped.
- 		 */
- 		if (pmd_downgrade && dax_is_zero_entry(entry))
--			unmap_mapping_range(mapping,
--				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
-+			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
-+							PG_PMD_NR, false);
- 
- 		err = radix_tree_preload(
- 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
-@@ -538,12 +539,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
- 	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
- 		/* we are replacing a zero page with block mapping */
- 		if (dax_is_pmd_entry(entry))
--			unmap_mapping_range(mapping,
--					(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
--					PMD_SIZE, 0);
-+			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
-+							PG_PMD_NR, false);
- 		else /* pte entry */
--			unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
--					PAGE_SIZE, 0);
-+			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
- 	}
- 
- 	spin_lock_irq(&mapping->tree_lock);
-@@ -1269,12 +1268,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
- }
- 
- #ifdef CONFIG_FS_DAX_PMD
--/*
-- * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
-- * more often than one might expect in the below functions.
-- */
--#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
--
- static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
- 		void *entry)
- {
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index ee073146aaa7..0359ee709434 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -1311,8 +1311,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
- 		unsigned long end, unsigned long floor, unsigned long ceiling);
- int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
- 			struct vm_area_struct *vma);
--void unmap_mapping_range(struct address_space *mapping,
--		loff_t const holebegin, loff_t const holelen, int even_cows);
- int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
- 			     unsigned long *start, unsigned long *end,
- 			     pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
-@@ -1343,6 +1341,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
- extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
- 			    unsigned long address, unsigned int fault_flags,
- 			    bool *unlocked);
-+void unmap_mapping_pages(struct address_space *mapping,
-+		pgoff_t start, pgoff_t nr, bool even_cows);
-+void unmap_mapping_range(struct address_space *mapping,
-+		loff_t const holebegin, loff_t const holelen, int even_cows);
- #else
- static inline int handle_mm_fault(struct vm_area_struct *vma,
- 		unsigned long address, unsigned int flags)
-@@ -1359,10 +1361,14 @@ static inline int fixup_user_fault(struct task_struct *tsk,
- 	BUG();
- 	return -EFAULT;
- }
-+static inline void unmap_mapping_pages(struct address_space *mapping,
-+		pgoff_t start, pgoff_t nr, bool even_cows) { }
-+static inline void unmap_mapping_range(struct address_space *mapping,
-+		loff_t const holebegin, loff_t const holelen, int even_cows) { }
- #endif
- 
--extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
--		unsigned int gup_flags);
-+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
-+		void *buf, int len, unsigned int gup_flags);
- extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- 		void *buf, int len, unsigned int gup_flags);
- extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-diff --git a/mm/khugepaged.c b/mm/khugepaged.c
-index ea4ff259b671..1cd18e4347fe 100644
---- a/mm/khugepaged.c
-+++ b/mm/khugepaged.c
-@@ -1399,8 +1399,7 @@ static void collapse_shmem(struct mm_struct *mm,
- 		}
- 
- 		if (page_mapped(page))
--			unmap_mapping_range(mapping, index << PAGE_SHIFT,
--					PAGE_SIZE, 0);
-+			unmap_mapping_pages(mapping, index, 1, false);
- 
- 		spin_lock_irq(&mapping->tree_lock);
- 
-diff --git a/mm/memory.c b/mm/memory.c
-index 85e7a87da79f..1b783faffaec 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -2791,9 +2791,38 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
- 	}
- }
- 
-+/**
-+ * unmap_mapping_pages() - Unmap pages from processes.
-+ * @mapping: The address space containing pages to be unmapped.
-+ * @start: Index of first page to be unmapped.
-+ * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
-+ * @even_cows: Whether to unmap even private COWed pages.
-+ *
-+ * Unmap the pages in this address space from any userspace process which
-+ * has them mmaped.  Generally, you want to remove COWed pages as well when
-+ * a file is being truncated, but not when invalidating pages from the page
-+ * cache.
-+ */
-+void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
-+		pgoff_t nr, bool even_cows)
-+{
-+	struct zap_details details = { };
-+
-+	details.check_mapping = even_cows ? NULL : mapping;
-+	details.first_index = start;
-+	details.last_index = start + nr - 1;
-+	if (details.last_index < details.first_index)
-+		details.last_index = ULONG_MAX;
-+
-+	i_mmap_lock_write(mapping);
-+	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
-+		unmap_mapping_range_tree(&mapping->i_mmap, &details);
-+	i_mmap_unlock_write(mapping);
-+}
-+
- /**
-  * unmap_mapping_range - unmap the portion of all mmaps in the specified
-- * address_space corresponding to the specified page range in the underlying
-+ * address_space corresponding to the specified byte range in the underlying
-  * file.
-  *
-  * @mapping: the address space containing mmaps to be unmapped.
-@@ -2811,7 +2840,6 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
- void unmap_mapping_range(struct address_space *mapping,
- 		loff_t const holebegin, loff_t const holelen, int even_cows)
- {
--	struct zap_details details = { };
- 	pgoff_t hba = holebegin >> PAGE_SHIFT;
- 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
- 
-@@ -2823,16 +2851,7 @@ void unmap_mapping_range(struct address_space *mapping,
- 			hlen = ULONG_MAX - hba + 1;
- 	}
- 
--	details.check_mapping = even_cows ? NULL : mapping;
--	details.first_index = hba;
--	details.last_index = hba + hlen - 1;
--	if (details.last_index < details.first_index)
--		details.last_index = ULONG_MAX;
--
--	i_mmap_lock_write(mapping);
--	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
--		unmap_mapping_range_tree(&mapping->i_mmap, &details);
--	i_mmap_unlock_write(mapping);
-+	unmap_mapping_pages(mapping, hba, hlen, even_cows);
- }
- EXPORT_SYMBOL(unmap_mapping_range);
- 
-diff --git a/mm/nommu.c b/mm/nommu.c
-index 17c00d93de2e..4b9864b17cb0 100644
---- a/mm/nommu.c
-+++ b/mm/nommu.c
-@@ -1788,13 +1788,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
- 	return -ENOMEM;
- }
- 
--void unmap_mapping_range(struct address_space *mapping,
--			 loff_t const holebegin, loff_t const holelen,
--			 int even_cows)
--{
--}
--EXPORT_SYMBOL(unmap_mapping_range);
--
- int filemap_fault(struct vm_fault *vmf)
- {
- 	BUG();
-diff --git a/mm/truncate.c b/mm/truncate.c
-index e4b4cf0f4070..c34e2fd4f583 100644
---- a/mm/truncate.c
-+++ b/mm/truncate.c
-@@ -179,12 +179,8 @@ static void
- truncate_cleanup_page(struct address_space *mapping, struct page *page)
- {
- 	if (page_mapped(page)) {
--		loff_t holelen;
--
--		holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
--		unmap_mapping_range(mapping,
--				   (loff_t)page->index << PAGE_SHIFT,
--				   holelen, 0);
-+		pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
-+		unmap_mapping_pages(mapping, page->index, nr, false);
- 	}
- 
- 	if (page_has_private(page))
-@@ -715,19 +711,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
- 					/*
- 					 * Zap the rest of the file in one hit.
- 					 */
--					unmap_mapping_range(mapping,
--					   (loff_t)index << PAGE_SHIFT,
--					   (loff_t)(1 + end - index)
--							 << PAGE_SHIFT,
--							 0);
-+					unmap_mapping_pages(mapping, index,
-+						(1 + end - index), false);
- 					did_range_unmap = 1;
- 				} else {
- 					/*
- 					 * Just zap this page
- 					 */
--					unmap_mapping_range(mapping,
--					   (loff_t)index << PAGE_SHIFT,
--					   PAGE_SIZE, 0);
-+					unmap_mapping_pages(mapping, index,
-+								1, false);
- 				}
- 			}
- 			BUG_ON(page_mapped(page));
-@@ -753,8 +745,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
- 	 * get remapped later.
- 	 */
- 	if (dax_mapping(mapping)) {
--		unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
--				    (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
-+		unmap_mapping_pages(mapping, start, end - start + 1, false);
- 	}
- out:
- 	cleancache_invalidate_inode(mapping);
--- 
-2.15.0
-
---
-To unsubscribe, send a message with 'unsubscribe linux-mm' in
-the body to majordomo@kvack.org.  For more info on Linux MM,
-see: http://www.linux-mm.org/ .
-Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
\ No newline at end of file
+ - nommu definition
\ No newline at end of file
diff --git a/a/content_digest b/N1/content_digest
index 202795a..81037a5 100644
--- a/a/content_digest
+++ b/N1/content_digest
@@ -24,290 +24,7 @@
   "v2:\n",
   " - Fix inverted mask in dax.c\n",
   " - Pass 'false' instead of '0' for 'only_cows'\n",
-  " - nommu definition\n",
-  "\n",
-  ">From ceee2e58548a5264b61000c02371956a1da3bee4 Mon Sep 17 00:00:00 2001\n",
-  "From: Matthew Wilcox <mawilcox\@microsoft.com>\n",
-  "Date: Tue, 5 Dec 2017 00:15:54 -0500\n",
-  "Subject: [PATCH] mm: Add unmap_mapping_pages\n",
-  "\n",
-  "Several users of unmap_mapping_range() would much prefer to express\n",
-  "their range in pages rather than bytes.  Unfortuately, on a 32-bit\n",
-  "kernel, you have to remember to cast your page number to a 64-bit type\n",
-  "before shifting it, and four places in the current tree didn't remember\n",
-  "to do that.  That's a sign of a bad interface.\n",
-  "\n",
-  "Conveniently, unmap_mapping_range() actually converts from bytes into\n",
-  "pages, so hoist the guts of unmap_mapping_range() into the new function\n",
-  "unmap_mapping_pages() and convert the callers which want to use pages.\n",
-  "\n",
-  "Signed-off-by: Matthew Wilcox <mawilcox\@microsoft.com>\n",
-  "Reported-by: \"zhangyi (F)\" <yi.zhang\@huawei.com>\n",
-  "---\n",
-  " fs/dax.c           | 19 ++++++-------------\n",
-  " include/linux/mm.h | 14 ++++++++++----\n",
-  " mm/khugepaged.c    |  3 +--\n",
-  " mm/memory.c        | 43 +++++++++++++++++++++++++++++++------------\n",
-  " mm/nommu.c         |  7 -------\n",
-  " mm/truncate.c      | 23 +++++++----------------\n",
-  " 6 files changed, 55 insertions(+), 54 deletions(-)\n",
-  "\n",
-  "diff --git a/fs/dax.c b/fs/dax.c\n",
-  "index 95981591977a..d0dc0278f067 100644\n",
-  "--- a/fs/dax.c\n",
-  "+++ b/fs/dax.c\n",
-  "\@\@ -44,6 +44,7 \@\@\n",
-  " \n",
-  " /* The 'colour' (ie low bits) within a PMD of a page offset.  */\n",
-  " #define PG_PMD_COLOUR\t((PMD_SIZE >> PAGE_SHIFT) - 1)\n",
-  "+#define PG_PMD_NR\t(PMD_SIZE >> PAGE_SHIFT)\n",
-  " \n",
-  " static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];\n",
-  " \n",
-  "\@\@ -375,8 +376,8 \@\@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,\n",
-  " \t\t * unmapped.\n",
-  " \t\t */\n",
-  " \t\tif (pmd_downgrade && dax_is_zero_entry(entry))\n",
-  "-\t\t\tunmap_mapping_range(mapping,\n",
-  "-\t\t\t\t(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);\n",
-  "+\t\t\tunmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,\n",
-  "+\t\t\t\t\t\t\tPG_PMD_NR, false);\n",
-  " \n",
-  " \t\terr = radix_tree_preload(\n",
-  " \t\t\t\tmapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);\n",
-  "\@\@ -538,12 +539,10 \@\@ static void *dax_insert_mapping_entry(struct address_space *mapping,\n",
-  " \tif (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {\n",
-  " \t\t/* we are replacing a zero page with block mapping */\n",
-  " \t\tif (dax_is_pmd_entry(entry))\n",
-  "-\t\t\tunmap_mapping_range(mapping,\n",
-  "-\t\t\t\t\t(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,\n",
-  "-\t\t\t\t\tPMD_SIZE, 0);\n",
-  "+\t\t\tunmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,\n",
-  "+\t\t\t\t\t\t\tPG_PMD_NR, false);\n",
-  " \t\telse /* pte entry */\n",
-  "-\t\t\tunmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,\n",
-  "-\t\t\t\t\tPAGE_SIZE, 0);\n",
-  "+\t\t\tunmap_mapping_pages(mapping, vmf->pgoff, 1, false);\n",
-  " \t}\n",
-  " \n",
-  " \tspin_lock_irq(&mapping->tree_lock);\n",
-  "\@\@ -1269,12 +1268,6 \@\@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,\n",
-  " }\n",
-  " \n",
-  " #ifdef CONFIG_FS_DAX_PMD\n",
-  "-/*\n",
-  "- * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up\n",
-  "- * more often than one might expect in the below functions.\n",
-  "- */\n",
-  "-#define PG_PMD_COLOUR\t((PMD_SIZE >> PAGE_SHIFT) - 1)\n",
-  "-\n",
-  " static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,\n",
-  " \t\tvoid *entry)\n",
-  " {\n",
-  "diff --git a/include/linux/mm.h b/include/linux/mm.h\n",
-  "index ee073146aaa7..0359ee709434 100644\n",
-  "--- a/include/linux/mm.h\n",
-  "+++ b/include/linux/mm.h\n",
-  "\@\@ -1311,8 +1311,6 \@\@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,\n",
-  " \t\tunsigned long end, unsigned long floor, unsigned long ceiling);\n",
-  " int copy_page_range(struct mm_struct *dst, struct mm_struct *src,\n",
-  " \t\t\tstruct vm_area_struct *vma);\n",
-  "-void unmap_mapping_range(struct address_space *mapping,\n",
-  "-\t\tloff_t const holebegin, loff_t const holelen, int even_cows);\n",
-  " int follow_pte_pmd(struct mm_struct *mm, unsigned long address,\n",
-  " \t\t\t     unsigned long *start, unsigned long *end,\n",
-  " \t\t\t     pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);\n",
-  "\@\@ -1343,6 +1341,10 \@\@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,\n",
-  " extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,\n",
-  " \t\t\t    unsigned long address, unsigned int fault_flags,\n",
-  " \t\t\t    bool *unlocked);\n",
-  "+void unmap_mapping_pages(struct address_space *mapping,\n",
-  "+\t\tpgoff_t start, pgoff_t nr, bool even_cows);\n",
-  "+void unmap_mapping_range(struct address_space *mapping,\n",
-  "+\t\tloff_t const holebegin, loff_t const holelen, int even_cows);\n",
-  " #else\n",
-  " static inline int handle_mm_fault(struct vm_area_struct *vma,\n",
-  " \t\tunsigned long address, unsigned int flags)\n",
-  "\@\@ -1359,10 +1361,14 \@\@ static inline int fixup_user_fault(struct task_struct *tsk,\n",
-  " \tBUG();\n",
-  " \treturn -EFAULT;\n",
-  " }\n",
-  "+static inline void unmap_mapping_pages(struct address_space *mapping,\n",
-  "+\t\tpgoff_t start, pgoff_t nr, bool even_cows) { }\n",
-  "+static inline void unmap_mapping_range(struct address_space *mapping,\n",
-  "+\t\tloff_t const holebegin, loff_t const holelen, int even_cows) { }\n",
-  " #endif\n",
-  " \n",
-  "-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,\n",
-  "-\t\tunsigned int gup_flags);\n",
-  "+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,\n",
-  "+\t\tvoid *buf, int len, unsigned int gup_flags);\n",
-  " extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,\n",
-  " \t\tvoid *buf, int len, unsigned int gup_flags);\n",
-  " extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,\n",
-  "diff --git a/mm/khugepaged.c b/mm/khugepaged.c\n",
-  "index ea4ff259b671..1cd18e4347fe 100644\n",
-  "--- a/mm/khugepaged.c\n",
-  "+++ b/mm/khugepaged.c\n",
-  "\@\@ -1399,8 +1399,7 \@\@ static void collapse_shmem(struct mm_struct *mm,\n",
-  " \t\t}\n",
-  " \n",
-  " \t\tif (page_mapped(page))\n",
-  "-\t\t\tunmap_mapping_range(mapping, index << PAGE_SHIFT,\n",
-  "-\t\t\t\t\tPAGE_SIZE, 0);\n",
-  "+\t\t\tunmap_mapping_pages(mapping, index, 1, false);\n",
-  " \n",
-  " \t\tspin_lock_irq(&mapping->tree_lock);\n",
-  " \n",
-  "diff --git a/mm/memory.c b/mm/memory.c\n",
-  "index 85e7a87da79f..1b783faffaec 100644\n",
-  "--- a/mm/memory.c\n",
-  "+++ b/mm/memory.c\n",
-  "\@\@ -2791,9 +2791,38 \@\@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,\n",
-  " \t}\n",
-  " }\n",
-  " \n",
-  "+/**\n",
-  "+ * unmap_mapping_pages() - Unmap pages from processes.\n",
-  "+ * \@mapping: The address space containing pages to be unmapped.\n",
-  "+ * \@start: Index of first page to be unmapped.\n",
-  "+ * \@nr: Number of pages to be unmapped.  0 to unmap to end of file.\n",
-  "+ * \@even_cows: Whether to unmap even private COWed pages.\n",
-  "+ *\n",
-  "+ * Unmap the pages in this address space from any userspace process which\n",
-  "+ * has them mmaped.  Generally, you want to remove COWed pages as well when\n",
-  "+ * a file is being truncated, but not when invalidating pages from the page\n",
-  "+ * cache.\n",
-  "+ */\n",
-  "+void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,\n",
-  "+\t\tpgoff_t nr, bool even_cows)\n",
-  "+{\n",
-  "+\tstruct zap_details details = { };\n",
-  "+\n",
-  "+\tdetails.check_mapping = even_cows ? NULL : mapping;\n",
-  "+\tdetails.first_index = start;\n",
-  "+\tdetails.last_index = start + nr - 1;\n",
-  "+\tif (details.last_index < details.first_index)\n",
-  "+\t\tdetails.last_index = ULONG_MAX;\n",
-  "+\n",
-  "+\ti_mmap_lock_write(mapping);\n",
-  "+\tif (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))\n",
-  "+\t\tunmap_mapping_range_tree(&mapping->i_mmap, &details);\n",
-  "+\ti_mmap_unlock_write(mapping);\n",
-  "+}\n",
-  "+\n",
-  " /**\n",
-  "  * unmap_mapping_range - unmap the portion of all mmaps in the specified\n",
-  "- * address_space corresponding to the specified page range in the underlying\n",
-  "+ * address_space corresponding to the specified byte range in the underlying\n",
-  "  * file.\n",
-  "  *\n",
-  "  * \@mapping: the address space containing mmaps to be unmapped.\n",
-  "\@\@ -2811,7 +2840,6 \@\@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,\n",
-  " void unmap_mapping_range(struct address_space *mapping,\n",
-  " \t\tloff_t const holebegin, loff_t const holelen, int even_cows)\n",
-  " {\n",
-  "-\tstruct zap_details details = { };\n",
-  " \tpgoff_t hba = holebegin >> PAGE_SHIFT;\n",
-  " \tpgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;\n",
-  " \n",
-  "\@\@ -2823,16 +2851,7 \@\@ void unmap_mapping_range(struct address_space *mapping,\n",
-  " \t\t\thlen = ULONG_MAX - hba + 1;\n",
-  " \t}\n",
-  " \n",
-  "-\tdetails.check_mapping = even_cows ? NULL : mapping;\n",
-  "-\tdetails.first_index = hba;\n",
-  "-\tdetails.last_index = hba + hlen - 1;\n",
-  "-\tif (details.last_index < details.first_index)\n",
-  "-\t\tdetails.last_index = ULONG_MAX;\n",
-  "-\n",
-  "-\ti_mmap_lock_write(mapping);\n",
-  "-\tif (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))\n",
-  "-\t\tunmap_mapping_range_tree(&mapping->i_mmap, &details);\n",
-  "-\ti_mmap_unlock_write(mapping);\n",
-  "+\tunmap_mapping_pages(mapping, hba, hlen, even_cows);\n",
-  " }\n",
-  " EXPORT_SYMBOL(unmap_mapping_range);\n",
-  " \n",
-  "diff --git a/mm/nommu.c b/mm/nommu.c\n",
-  "index 17c00d93de2e..4b9864b17cb0 100644\n",
-  "--- a/mm/nommu.c\n",
-  "+++ b/mm/nommu.c\n",
-  "\@\@ -1788,13 +1788,6 \@\@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,\n",
-  " \treturn -ENOMEM;\n",
-  " }\n",
-  " \n",
-  "-void unmap_mapping_range(struct address_space *mapping,\n",
-  "-\t\t\t loff_t const holebegin, loff_t const holelen,\n",
-  "-\t\t\t int even_cows)\n",
-  "-{\n",
-  "-}\n",
-  "-EXPORT_SYMBOL(unmap_mapping_range);\n",
-  "-\n",
-  " int filemap_fault(struct vm_fault *vmf)\n",
-  " {\n",
-  " \tBUG();\n",
-  "diff --git a/mm/truncate.c b/mm/truncate.c\n",
-  "index e4b4cf0f4070..c34e2fd4f583 100644\n",
-  "--- a/mm/truncate.c\n",
-  "+++ b/mm/truncate.c\n",
-  "\@\@ -179,12 +179,8 \@\@ static void\n",
-  " truncate_cleanup_page(struct address_space *mapping, struct page *page)\n",
-  " {\n",
-  " \tif (page_mapped(page)) {\n",
-  "-\t\tloff_t holelen;\n",
-  "-\n",
-  "-\t\tholelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;\n",
-  "-\t\tunmap_mapping_range(mapping,\n",
-  "-\t\t\t\t   (loff_t)page->index << PAGE_SHIFT,\n",
-  "-\t\t\t\t   holelen, 0);\n",
-  "+\t\tpgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;\n",
-  "+\t\tunmap_mapping_pages(mapping, page->index, nr, false);\n",
-  " \t}\n",
-  " \n",
-  " \tif (page_has_private(page))\n",
-  "\@\@ -715,19 +711,15 \@\@ int invalidate_inode_pages2_range(struct address_space *mapping,\n",
-  " \t\t\t\t\t/*\n",
-  " \t\t\t\t\t * Zap the rest of the file in one hit.\n",
-  " \t\t\t\t\t */\n",
-  "-\t\t\t\t\tunmap_mapping_range(mapping,\n",
-  "-\t\t\t\t\t   (loff_t)index << PAGE_SHIFT,\n",
-  "-\t\t\t\t\t   (loff_t)(1 + end - index)\n",
-  "-\t\t\t\t\t\t\t << PAGE_SHIFT,\n",
-  "-\t\t\t\t\t\t\t 0);\n",
-  "+\t\t\t\t\tunmap_mapping_pages(mapping, index,\n",
-  "+\t\t\t\t\t\t(1 + end - index), false);\n",
-  " \t\t\t\t\tdid_range_unmap = 1;\n",
-  " \t\t\t\t} else {\n",
-  " \t\t\t\t\t/*\n",
-  " \t\t\t\t\t * Just zap this page\n",
-  " \t\t\t\t\t */\n",
-  "-\t\t\t\t\tunmap_mapping_range(mapping,\n",
-  "-\t\t\t\t\t   (loff_t)index << PAGE_SHIFT,\n",
-  "-\t\t\t\t\t   PAGE_SIZE, 0);\n",
-  "+\t\t\t\t\tunmap_mapping_pages(mapping, index,\n",
-  "+\t\t\t\t\t\t\t\t1, false);\n",
-  " \t\t\t\t}\n",
-  " \t\t\t}\n",
-  " \t\t\tBUG_ON(page_mapped(page));\n",
-  "\@\@ -753,8 +745,7 \@\@ int invalidate_inode_pages2_range(struct address_space *mapping,\n",
-  " \t * get remapped later.\n",
-  " \t */\n",
-  " \tif (dax_mapping(mapping)) {\n",
-  "-\t\tunmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,\n",
-  "-\t\t\t\t    (loff_t)(end - start + 1) << PAGE_SHIFT, 0);\n",
-  "+\t\tunmap_mapping_pages(mapping, start, end - start + 1, false);\n",
-  " \t}\n",
-  " out:\n",
-  " \tcleancache_invalidate_inode(mapping);\n",
-  "-- \n",
-  "2.15.0\n",
-  "\n",
-  "--\n",
-  "To unsubscribe, send a message with 'unsubscribe linux-mm' in\n",
-  "the body to majordomo\@kvack.org.  For more info on Linux MM,\n",
-  "see: http://www.linux-mm.org/ .\n",
-  "Don't email: <a href=mailto:\"dont\@kvack.org\"> email\@kvack.org </a>"
+  " - nommu definition"
 ]
 
-410c3328a2dedc4004aa27bd85092a576f6eed81fd41d324f414d3719ece7563
+8044d1f53bd05a62d4b8363944c33bf813c445f8da4ca6a97172faa45705f42e

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.