linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org, Christoph Hellwig <hch@lst.de>,
	Jeff Layton <jlayton@kernel.org>
Subject: [PATCH v9 16/96] mm: Add folio flag manipulation functions
Date: Wed,  5 May 2021 16:05:08 +0100	[thread overview]
Message-ID: <20210505150628.111735-17-willy@infradead.org> (raw)
In-Reply-To: <20210505150628.111735-1-willy@infradead.org>

These new functions are the folio analogues of the various PageFlags
functions.  If CONFIG_DEBUG_VM_PGFLAGS is enabled, we check the folio
is not a tail page at every invocation.  This will also catch the
PagePoisoned case as a poisoned page has every bit set, which would
include PageTail.

This saves 1727 bytes of text with the distro-derived config that
I'm testing due to removing a double call to compound_head() in
PageSwapCache().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jeff Layton <jlayton@kernel.org>
---
 include/linux/page-flags.h | 203 +++++++++++++++++++++++++++----------
 1 file changed, 148 insertions(+), 55 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e069aa8b11b7..ef8b7c6dc91c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -140,6 +140,8 @@ enum pageflags {
 #endif
 	__NR_PAGEFLAGS,
 
+	PG_readahead = PG_reclaim,
+
 	/* Filesystems */
 	PG_checked = PG_owner_priv_1,
 
@@ -239,6 +241,15 @@ static inline void page_init_poison(struct page *page, size_t size)
 }
 #endif
 
+static unsigned long *folio_flags(struct folio *folio, unsigned n)
+{
+	struct page *page = &folio->page;
+
+	VM_BUG_ON_PGFLAGS(PageTail(page), page);
+	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+	return &page[n].flags;
+}
+
 /*
  * Page flags policies wrt compound pages
  *
@@ -283,34 +294,62 @@ static inline void page_init_poison(struct page *page, size_t size)
 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
 		PF_POISONED_CHECK(&page[1]); })
 
+/* Which page is the flag stored in */
+#define FOLIO_PF_ANY		0
+#define FOLIO_PF_HEAD		0
+#define FOLIO_PF_ONLY_HEAD	0
+#define FOLIO_PF_NO_TAIL	0
+#define FOLIO_PF_NO_COMPOUND	0
+#define FOLIO_PF_SECOND		1
+
 /*
  * Macros to create function definitions for page flags
  */
 #define TESTPAGEFLAG(uname, lname, policy)				\
+static __always_inline bool folio_##lname(struct folio *folio)		\
+{ return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
 static __always_inline int Page##uname(struct page *page)		\
 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
 
 #define SETPAGEFLAG(uname, lname, policy)				\
+static __always_inline							\
+void folio_set_##lname##_flag(struct folio *folio)			\
+{ set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
 static __always_inline void SetPage##uname(struct page *page)		\
 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define CLEARPAGEFLAG(uname, lname, policy)				\
+static __always_inline							\
+void folio_clear_##lname##_flag(struct folio *folio)			\
+{ clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
 static __always_inline void ClearPage##uname(struct page *page)		\
 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define __SETPAGEFLAG(uname, lname, policy)				\
+static __always_inline							\
+void __folio_set_##lname##_flag(struct folio *folio)			\
+{ __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
 static __always_inline void __SetPage##uname(struct page *page)		\
 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define __CLEARPAGEFLAG(uname, lname, policy)				\
+static __always_inline							\
+void __folio_clear_##lname##_flag(struct folio *folio)			\
+{ __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
 static __always_inline void __ClearPage##uname(struct page *page)	\
 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define TESTSETFLAG(uname, lname, policy)				\
+static __always_inline							\
+bool folio_test_set_##lname##_flag(struct folio *folio)		\
+{ return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
 static __always_inline int TestSetPage##uname(struct page *page)	\
 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
 
 #define TESTCLEARFLAG(uname, lname, policy)				\
+static __always_inline							\
+bool folio_test_clear_##lname##_flag(struct folio *folio)		\
+{ return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
 static __always_inline int TestClearPage##uname(struct page *page)	\
 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
 
@@ -328,29 +367,37 @@ static __always_inline int TestClearPage##uname(struct page *page)	\
 	TESTSETFLAG(uname, lname, policy)				\
 	TESTCLEARFLAG(uname, lname, policy)
 
-#define TESTPAGEFLAG_FALSE(uname)					\
+#define TESTPAGEFLAG_FALSE(uname, lname)				\
+static inline bool folio_##lname(const struct folio *folio) { return 0; } \
 static inline int Page##uname(const struct page *page) { return 0; }
 
-#define SETPAGEFLAG_NOOP(uname)						\
+#define SETPAGEFLAG_NOOP(uname, lname)					\
+static inline void folio_set_##lname##_flag(struct folio *folio) { }	\
 static inline void SetPage##uname(struct page *page) {  }
 
-#define CLEARPAGEFLAG_NOOP(uname)					\
+#define CLEARPAGEFLAG_NOOP(uname, lname)				\
+static inline void folio_clear_##lname##_flag(struct folio *folio) { }	\
 static inline void ClearPage##uname(struct page *page) {  }
 
-#define __CLEARPAGEFLAG_NOOP(uname)					\
+#define __CLEARPAGEFLAG_NOOP(uname, lname)				\
+static inline void __folio_clear_##lname_flags(struct folio *folio) { }	\
 static inline void __ClearPage##uname(struct page *page) {  }
 
-#define TESTSETFLAG_FALSE(uname)					\
+#define TESTSETFLAG_FALSE(uname, lname)					\
+static inline bool folio_test_set_##lname##_flag(struct folio *folio)	\
+{ return 0; }								\
 static inline int TestSetPage##uname(struct page *page) { return 0; }
 
-#define TESTCLEARFLAG_FALSE(uname)					\
+#define TESTCLEARFLAG_FALSE(uname, lname)				\
+static inline bool folio_test_clear_##lname##_flag(struct folio *folio) \
+{ return 0; }								\
 static inline int TestClearPage##uname(struct page *page) { return 0; }
 
-#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
-	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
+#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname)	\
+	SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
 
-#define TESTSCFLAG_FALSE(uname)						\
-	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
+#define TESTSCFLAG_FALSE(uname, lname)					\
+	TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
 
 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
@@ -406,8 +453,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
-	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
+PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
+	TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
 
 #ifdef CONFIG_HIGHMEM
 /*
@@ -416,22 +463,25 @@ PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
  */
 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
 #else
-PAGEFLAG_FALSE(HighMem)
+PAGEFLAG_FALSE(HighMem, highmem)
 #endif
 
 #ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline bool folio_swapcache(struct folio *folio)
 {
-#ifdef CONFIG_THP_SWAP
-	page = compound_head(page);
-#endif
-	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
+	return folio_swapbacked(folio) &&
+			test_bit(PG_swapcache, folio_flags(folio, 0));
+}
 
+static __always_inline bool PageSwapCache(struct page *page)
+{
+	return folio_swapcache(page_folio(page));
 }
+
 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 #else
-PAGEFLAG_FALSE(SwapCache)
+PAGEFLAG_FALSE(SwapCache, swapcache)
 #endif
 
 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
@@ -443,14 +493,14 @@ PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
 #else
-PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
-	TESTSCFLAG_FALSE(Mlocked)
+PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
+	TESTSCFLAG_FALSE(Mlocked, mlocked)
 #endif
 
 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
 #else
-PAGEFLAG_FALSE(Uncached)
+PAGEFLAG_FALSE(Uncached, uncached)
 #endif
 
 #ifdef CONFIG_MEMORY_FAILURE
@@ -459,7 +509,7 @@ TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
 #define __PG_HWPOISON (1UL << PG_hwpoison)
 extern bool take_page_off_buddy(struct page *page);
 #else
-PAGEFLAG_FALSE(HWPoison)
+PAGEFLAG_FALSE(HWPoison, hwpoison)
 #define __PG_HWPOISON 0
 #endif
 
@@ -505,10 +555,14 @@ static __always_inline int PageMappingFlags(struct page *page)
 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
 }
 
-static __always_inline int PageAnon(struct page *page)
+static __always_inline bool folio_anon(struct folio *folio)
+{
+	return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+static __always_inline bool PageAnon(struct page *page)
 {
-	page = compound_head(page);
-	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+	return folio_anon(page_folio(page));
 }
 
 static __always_inline int __PageMovable(struct page *page)
@@ -524,30 +578,32 @@ static __always_inline int __PageMovable(struct page *page)
  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
  * anon_vma, but to that page's node of the stable tree.
  */
-static __always_inline int PageKsm(struct page *page)
+static __always_inline bool folio_ksm(struct folio *folio)
 {
-	page = compound_head(page);
-	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
 				PAGE_MAPPING_KSM;
 }
+
+static __always_inline bool PageKsm(struct page *page)
+{
+	return folio_ksm(page_folio(page));
+}
 #else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(Ksm, ksm)
 #endif
 
 u64 stable_page_flags(struct page *page);
 
-static inline int PageUptodate(struct page *page)
+static inline bool folio_uptodate(struct folio *folio)
 {
-	int ret;
-	page = compound_head(page);
-	ret = test_bit(PG_uptodate, &(page)->flags);
+	bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
 	/*
-	 * Must ensure that the data we read out of the page is loaded
-	 * _after_ we've loaded page->flags to check for PageUptodate.
-	 * We can skip the barrier if the page is not uptodate, because
+	 * Must ensure that the data we read out of the folio is loaded
+	 * _after_ we've loaded folio->flags to check the uptodate bit.
+	 * We can skip the barrier if the folio is not uptodate, because
 	 * we wouldn't be reading anything from it.
 	 *
-	 * See SetPageUptodate() for the other side of the story.
+	 * See folio_mark_uptodate() for the other side of the story.
 	 */
 	if (ret)
 		smp_rmb();
@@ -555,23 +611,36 @@ static inline int PageUptodate(struct page *page)
 	return ret;
 }
 
-static __always_inline void __SetPageUptodate(struct page *page)
+static inline int PageUptodate(struct page *page)
+{
+	return folio_uptodate(page_folio(page));
+}
+
+static __always_inline void __folio_mark_uptodate(struct folio *folio)
 {
-	VM_BUG_ON_PAGE(PageTail(page), page);
 	smp_wmb();
-	__set_bit(PG_uptodate, &page->flags);
+	__set_bit(PG_uptodate, folio_flags(folio, 0));
 }
 
-static __always_inline void SetPageUptodate(struct page *page)
+static __always_inline void folio_mark_uptodate(struct folio *folio)
 {
-	VM_BUG_ON_PAGE(PageTail(page), page);
 	/*
 	 * Memory barrier must be issued before setting the PG_uptodate bit,
-	 * so that all previous stores issued in order to bring the page
-	 * uptodate are actually visible before PageUptodate becomes true.
+	 * so that all previous stores issued in order to bring the folio
+	 * uptodate are actually visible before folio_uptodate becomes true.
 	 */
 	smp_wmb();
-	set_bit(PG_uptodate, &page->flags);
+	set_bit(PG_uptodate, folio_flags(folio, 0));
+}
+
+static __always_inline void __SetPageUptodate(struct page *page)
+{
+	__folio_mark_uptodate((struct folio *)page);
+}
+
+static __always_inline void SetPageUptodate(struct page *page)
+{
+	folio_mark_uptodate((struct folio *)page);
 }
 
 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
@@ -596,6 +665,17 @@ static inline void set_page_writeback_keepwrite(struct page *page)
 
 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
 
+/* Whether there are one or multiple pages in a folio */
+static inline bool folio_single(struct folio *folio)
+{
+	return !folio_head(folio);
+}
+
+static inline bool folio_multi(struct folio *folio)
+{
+	return folio_head(folio);
+}
+
 static __always_inline void set_compound_head(struct page *page, struct page *head)
 {
 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
@@ -619,12 +699,15 @@ static inline void ClearPageCompound(struct page *page)
 #ifdef CONFIG_HUGETLB_PAGE
 int PageHuge(struct page *page);
 int PageHeadHuge(struct page *page);
+static inline bool folio_hugetlb(struct folio *folio)
+{
+	return PageHeadHuge(&folio->page);
+}
 #else
-TESTPAGEFLAG_FALSE(Huge)
-TESTPAGEFLAG_FALSE(HeadHuge)
+TESTPAGEFLAG_FALSE(Huge, hugetlb)
+TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
 #endif
 
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /*
  * PageHuge() only returns true for hugetlbfs pages, but not for
@@ -640,6 +723,11 @@ static inline int PageTransHuge(struct page *page)
 	return PageHead(page);
 }
 
+static inline bool folio_transhuge(struct folio *folio)
+{
+	return folio_head(folio);
+}
+
 /*
  * PageTransCompound returns true for both transparent huge pages
  * and hugetlbfs pages, so it should only be called when it's known
@@ -713,12 +801,12 @@ static inline int PageTransTail(struct page *page)
 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
 	TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
 #else
-TESTPAGEFLAG_FALSE(TransHuge)
-TESTPAGEFLAG_FALSE(TransCompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap)
-TESTPAGEFLAG_FALSE(TransTail)
-PAGEFLAG_FALSE(DoubleMap)
-	TESTSCFLAG_FALSE(DoubleMap)
+TESTPAGEFLAG_FALSE(TransHuge, transhuge)
+TESTPAGEFLAG_FALSE(TransCompound, transcompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
+TESTPAGEFLAG_FALSE(TransTail, transtail)
+PAGEFLAG_FALSE(DoubleMap, double_map)
+	TESTSCFLAG_FALSE(DoubleMap, double_map)
 #endif
 
 /*
@@ -871,6 +959,11 @@ static inline int page_has_private(struct page *page)
 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
 }
 
+static inline bool folio_has_private(struct folio *folio)
+{
+	return page_has_private(&folio->page);
+}
+
 #undef PF_ANY
 #undef PF_HEAD
 #undef PF_ONLY_HEAD
-- 
2.30.2


  parent reply	other threads:[~2021-05-05 15:26 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-05 15:04 [PATCH v9 00/96] Memory folios Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 01/96] mm: Optimise nth_page for contiguous memmap Matthew Wilcox (Oracle)
2021-05-05 17:24   ` Vlastimil Babka
2021-05-05 15:04 ` [PATCH v9 02/96] mm: Make __dump_page static Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 03/96] mm/debug: Factor PagePoisoned out of __dump_page Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 04/96] mm/page_owner: Constify dump_page_owner Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 05/96] mm: Make compound_head const-preserving Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 06/96] mm: Constify get_pfnblock_flags_mask and get_pfnblock_migratetype Matthew Wilcox (Oracle)
2021-05-05 15:04 ` [PATCH v9 07/96] mm: Constify page_count and page_ref_count Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 08/96] mm: Fix struct page layout on 32-bit systems Matthew Wilcox (Oracle)
2021-05-05 17:33   ` Vlastimil Babka
2021-05-05 15:05 ` [PATCH v9 09/96] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 10/96] mm: Add folio_pgdat and folio_zone Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 11/96] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 12/96] mm/debug: Add VM_BUG_ON_FOLIO and VM_WARN_ON_ONCE_FOLIO Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 13/96] mm: Add folio reference count functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 14/96] mm: Add folio_put Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 15/96] mm: Add folio_get Matthew Wilcox (Oracle)
2021-05-05 15:05 ` Matthew Wilcox (Oracle) [this message]
2021-05-05 15:05 ` [PATCH v9 17/96] mm: Add folio_young() and folio_idle() Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 18/96] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 19/96] mm/filemap: Add folio_index, folio_file_page and folio_contains Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 20/96] mm/filemap: Add folio_next_index Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 21/96] mm/filemap: Add folio_offset and folio_file_offset Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 22/96] mm/util: Add folio_mapping and folio_file_mapping Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 23/96] mm: Add folio_mapcount Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 24/96] mm/memcg: Add folio wrappers for various functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 25/96] mm/filemap: Add folio_unlock Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 26/96] mm/filemap: Add folio_lock Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 27/96] mm/filemap: Add folio_lock_killable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 28/96] mm/filemap: Add __folio_lock_async Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 29/96] mm/filemap: Add __folio_lock_or_retry Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 30/96] mm/filemap: Add folio_wait_locked Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 31/96] mm/swap: Add folio_rotate_reclaimable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 32/96] mm/filemap: Add folio_end_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 33/96] mm/writeback: Add folio_wait_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 34/96] mm/writeback: Add folio_wait_stable Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 35/96] mm/filemap: Add folio_wait_bit Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 36/96] mm/filemap: Add folio_wake_bit Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 37/96] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 38/96] mm/filemap: Add folio private_2 functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 39/96] fs/netfs: Add folio fscache functions Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 40/96] mm: Add folio_mapped Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 41/96] mm/workingset: Convert workingset_activation to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 42/96] mm/swap: Add folio_activate Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 43/96] mm/swap: Add folio_mark_accessed Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 44/96] mm/rmap: Add folio_mkclean Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 45/96] mm: Add kmap_local_folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 46/96] mm: Add flush_dcache_folio Matthew Wilcox (Oracle)
2021-05-05 23:35   ` kernel test robot
2021-05-06  2:33     ` Matthew Wilcox
2021-05-05 15:05 ` [PATCH v9 47/96] mm: Add arch_make_folio_accessible Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 48/96] mm/memcg: Remove 'page' parameter to mem_cgroup_charge_statistics Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 49/96] mm/memcg: Use the node id in mem_cgroup_update_tree Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 50/96] mm/memcg: Convert commit_charge to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 51/96] mm/memcg: Add folio_charge_cgroup Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 52/96] mm/memcg: Add folio_uncharge_cgroup Matthew Wilcox (Oracle)
2021-05-05 20:24   ` kernel test robot
2021-05-05 15:05 ` [PATCH v9 53/96] mm/memcg: Convert mem_cgroup_track_foreign_dirty_slowpath to folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 54/96] mm/writeback: Rename __add_wb_stat to wb_stat_mod Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 55/96] flex_proportions: Allow N events instead of 1 Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 56/96] mm/writeback: Change __wb_writeout_inc to __wb_writeout_add Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 57/96] mm/writeback: Convert test_clear_page_writeback to __folio_end_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 58/96] mm/writeback: Add folio_start_writeback Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 59/96] mm/writeback: Add folio_mark_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 60/96] mm/writeback: Use __set_page_dirty in __set_page_dirty_nobuffers Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 61/96] mm/writeback: Add __folio_mark_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 62/96] mm/writeback: Add filemap_dirty_folio Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 63/96] mm/writeback: Add folio_account_cleaned Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 64/96] mm/writeback: Add folio_cancel_dirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 65/96] mm/writeback: Add folio_clear_dirty_for_io Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 66/96] mm/writeback: Add folio_account_redirty Matthew Wilcox (Oracle)
2021-05-05 15:05 ` [PATCH v9 67/96] mm/writeback: Add folio_redirty_for_writepage Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 68/96] mm/filemap: Add i_blocks_per_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 69/96] mm/filemap: Add folio_mkwrite_check_truncate Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 70/96] mm/filemap: Add readahead_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 71/96] block: Add bio_add_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 72/96] block: Add bio_for_each_folio_all Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 73/96] mm/lru: Add folio_lru and folio_is_file_lru Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 74/96] mm/workingset: Convert workingset_refault to take a folio Matthew Wilcox (Oracle)
2021-05-05 20:17   ` kernel test robot
2021-05-05 20:57     ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 75/96] mm/lru: Add folio_add_lru Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 76/96] mm/page_alloc: Add __alloc_folio, __alloc_folio_node and alloc_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 77/96] mm/filemap: Add filemap_alloc_folio Matthew Wilcox (Oracle)
2021-05-06  0:00   ` kernel test robot
2021-05-06  2:28     ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 78/96] mm/filemap: Add folio_add_to_page_cache Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 79/96] mm/filemap: Convert mapping_get_entry to return a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 80/96] mm/filemap: Add filemap_get_folio and find_get_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 81/96] mm/filemap: Add filemap_get_stable_folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 82/96] iomap: Convert to_iomap_page to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 83/96] iomap: Convert iomap_page_create " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 84/96] iomap: Convert iomap_page_release " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 85/96] iomap: Convert iomap_releasepage to use " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 86/96] iomap: Convert iomap_invalidatepage " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 87/96] iomap: Pass the iomap_page into iomap_set_range_uptodate Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 88/96] iomap: Use folio offsets instead of page offsets Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 89/96] iomap: Convert bio completions to use folios Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 90/96] iomap: Convert readahead and readpage to use a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 91/96] iomap: Convert iomap_page_mkwrite " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 92/96] iomap: Convert iomap_write_begin and iomap_write_end to folios Matthew Wilcox (Oracle)
2021-05-05 21:36   ` kernel test robot
2021-05-05 22:10     ` Matthew Wilcox
2021-05-05 15:06 ` [PATCH v9 93/96] iomap: Convert iomap_read_inline_data to take a folio Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 94/96] iomap: Convert iomap_write_end_inline " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 95/96] iomap: Convert iomap_add_to_ioend " Matthew Wilcox (Oracle)
2021-05-05 15:06 ` [PATCH v9 96/96] iomap: Convert iomap_do_writepage to use " Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210505150628.111735-17-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=hch@lst.de \
    --cc=jlayton@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).