linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] mm: Support THPs in zero_user_segments
@ 2020-11-24  4:15 Matthew Wilcox (Oracle)
  2020-11-24  4:15 ` [PATCH 2/4] fix mm-filemap-add-helper-for-finding-pages.patch Matthew Wilcox (Oracle)
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-11-24  4:15 UTC (permalink / raw)
  To: linux-mm, akpm; +Cc: Matthew Wilcox (Oracle)

We can only kmap() one subpage of a THP at a time, so loop over all
relevant subpages, skipping ones which don't need to be zeroed.  This is
too large to inline when THPs are enabled and we actually need highmem,
so put it in highmem.c.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/highmem.h | 19 ++++++++++---
 mm/highmem.c            | 59 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 74 insertions(+), 4 deletions(-)

diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7d098bd621f6..f20916707ff9 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -193,13 +193,22 @@ static inline void clear_highpage(struct page *page)
 	kunmap_atomic(kaddr);
 }
 
+/*
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
+ */
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+		unsigned start2, unsigned end2);
+#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
 static inline void zero_user_segments(struct page *page,
-	unsigned start1, unsigned end1,
-	unsigned start2, unsigned end2)
+		unsigned start1, unsigned end1,
+		unsigned start2, unsigned end2)
 {
 	void *kaddr = kmap_atomic(page);
+	unsigned int i;
 
-	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
 
 	if (end1 > start1)
 		memset(kaddr + start1, 0, end1 - start1);
@@ -208,8 +217,10 @@ static inline void zero_user_segments(struct page *page,
 		memset(kaddr + start2, 0, end2 - start2);
 
 	kunmap_atomic(kaddr);
-	flush_dcache_page(page);
+	for (i = 0; i < compound_nr(page); i++)
+		flush_dcache_page(page + i);
 }
+#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
 
 static inline void zero_user_segment(struct page *page,
 	unsigned start, unsigned end)
diff --git a/mm/highmem.c b/mm/highmem.c
index 54bd233846c9..3e1087f2b735 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -359,6 +359,65 @@ void kunmap_high(struct page *page)
 		wake_up(pkmap_map_wait);
 }
 EXPORT_SYMBOL(kunmap_high);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+		unsigned start2, unsigned end2)
+{
+	unsigned int i;
+
+	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
+
+	for (i = 0; i < compound_nr(page); i++) {
+		void *kaddr;
+		unsigned this_end;
+
+		if (end1 == 0 && start2 >= PAGE_SIZE) {
+			start2 -= PAGE_SIZE;
+			end2 -= PAGE_SIZE;
+			continue;
+		}
+
+		if (start1 >= PAGE_SIZE) {
+			start1 -= PAGE_SIZE;
+			end1 -= PAGE_SIZE;
+			if (start2) {
+				start2 -= PAGE_SIZE;
+				end2 -= PAGE_SIZE;
+			}
+			continue;
+		}
+
+		kaddr = kmap_atomic(page + i);
+
+		this_end = min_t(unsigned, end1, PAGE_SIZE);
+		if (end1 > start1)
+			memset(kaddr + start1, 0, this_end - start1);
+		end1 -= this_end;
+		start1 = 0;
+
+		if (start2 >= PAGE_SIZE) {
+			start2 -= PAGE_SIZE;
+			end2 -= PAGE_SIZE;
+		} else {
+			this_end = min_t(unsigned, end2, PAGE_SIZE);
+			if (end2 > start2)
+				memset(kaddr + start2, 0, this_end - start2);
+			end2 -= this_end;
+			start2 = 0;
+		}
+
+		kunmap_atomic(kaddr);
+		flush_dcache_page(page + i);
+
+		if (!end1 && !end2)
+			break;
+	}
+
+	BUG_ON((start1 | start2 | end1 | end2) != 0);
+}
+EXPORT_SYMBOL(zero_user_segments);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_HIGHMEM */
 
 #ifdef CONFIG_KMAP_LOCAL
-- 
2.29.2



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 2/4] fix mm-filemap-add-helper-for-finding-pages.patch
  2020-11-24  4:15 [PATCH 1/4] mm: Support THPs in zero_user_segments Matthew Wilcox (Oracle)
@ 2020-11-24  4:15 ` Matthew Wilcox (Oracle)
  2020-11-24  4:15 ` [PATCH 3/4] fix mm-add-and-use-find_lock_entries.patch Matthew Wilcox (Oracle)
  2020-11-24  4:15 ` [PATCH 4/4] fix mm-filemap-add-mapping_seek_hole_data.patch Matthew Wilcox (Oracle)
  2 siblings, 0 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-11-24  4:15 UTC (permalink / raw)
  To: linux-mm, akpm; +Cc: Matthew Wilcox (Oracle)

---
 mm/filemap.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 7c83ee17f756..f3b448431bd4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1858,7 +1858,6 @@ static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max,
 		put_page(page);
 		goto reset;
 	}
-	VM_BUG_ON_PAGE(!thp_contains(page, xas->xa_index), page);
 
 	return page;
 reset:
-- 
2.29.2



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 3/4] fix mm-add-and-use-find_lock_entries.patch
  2020-11-24  4:15 [PATCH 1/4] mm: Support THPs in zero_user_segments Matthew Wilcox (Oracle)
  2020-11-24  4:15 ` [PATCH 2/4] fix mm-filemap-add-helper-for-finding-pages.patch Matthew Wilcox (Oracle)
@ 2020-11-24  4:15 ` Matthew Wilcox (Oracle)
  2020-11-24  4:15 ` [PATCH 4/4] fix mm-filemap-add-mapping_seek_hole_data.patch Matthew Wilcox (Oracle)
  2 siblings, 0 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-11-24  4:15 UTC (permalink / raw)
  To: linux-mm, akpm; +Cc: Matthew Wilcox (Oracle)

---
 mm/filemap.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/mm/filemap.c b/mm/filemap.c
index f3b448431bd4..7dfd7024d361 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1941,6 +1941,8 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
 				goto put;
 			if (page->mapping != mapping || PageWriteback(page))
 				goto unlock;
+			VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index),
+					page);
 		}
 		indices[pvec->nr] = xas.xa_index;
 		if (!pagevec_add(pvec, page))
-- 
2.29.2



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 4/4] fix mm-filemap-add-mapping_seek_hole_data.patch
  2020-11-24  4:15 [PATCH 1/4] mm: Support THPs in zero_user_segments Matthew Wilcox (Oracle)
  2020-11-24  4:15 ` [PATCH 2/4] fix mm-filemap-add-helper-for-finding-pages.patch Matthew Wilcox (Oracle)
  2020-11-24  4:15 ` [PATCH 3/4] fix mm-add-and-use-find_lock_entries.patch Matthew Wilcox (Oracle)
@ 2020-11-24  4:15 ` Matthew Wilcox (Oracle)
  2 siblings, 0 replies; 4+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-11-24  4:15 UTC (permalink / raw)
  To: linux-mm, akpm; +Cc: Matthew Wilcox (Oracle)

---
 mm/filemap.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 7dfd7024d361..a2150a6c15e4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2706,7 +2706,8 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 				seek_data);
 		if (start < pos)
 			goto unlock;
-		put_page(page);
+		if (!xa_is_value(page))
+			put_page(page);
 	}
 	rcu_read_unlock();
 
-- 
2.29.2



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-11-24  4:15 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-24  4:15 [PATCH 1/4] mm: Support THPs in zero_user_segments Matthew Wilcox (Oracle)
2020-11-24  4:15 ` [PATCH 2/4] fix mm-filemap-add-helper-for-finding-pages.patch Matthew Wilcox (Oracle)
2020-11-24  4:15 ` [PATCH 3/4] fix mm-add-and-use-find_lock_entries.patch Matthew Wilcox (Oracle)
2020-11-24  4:15 ` [PATCH 4/4] fix mm-filemap-add-mapping_seek_hole_data.patch Matthew Wilcox (Oracle)

This is a public inbox, see mirroring instructions
on how to clone and mirror all data and code used for this inbox