* [RFC PATCH 01/11] mm: Introduce struct folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 02/11] mm: Add put_folio Matthew Wilcox (Oracle)
` (9 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
We have trouble keeping track of whether we've already called
compound_head() to ensure we're not operating on a tail page. Further,
it's never clear whether we intend a struct page to refer to PAGE_SIZE
bytes or page_size(compound_head(page)).
Introduce a new type 'struct folio' that always refers to an entire
(possibly compound) page, and points to the head page (or base page).
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 5 +++++
include/linux/mm_types.h | 17 +++++++++++++++++
2 files changed, 22 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d1f64744ace2..7db9a10f084b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -916,6 +916,11 @@ static inline unsigned int compound_order(struct page *page)
return page[1].compound_order;
}
+static inline unsigned int folio_order(struct folio *folio)
+{
+ return compound_order(&folio->page);
+}
+
static inline bool hpage_pincount_available(struct page *page)
{
/*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 65df8abd90bd..d7e487d9998f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -223,6 +223,23 @@ struct page {
#endif
} _struct_page_alignment;
+/*
+ * A struct folio is either a base (order-0) page or the head page of
+ * a compound page.
+ */
+struct folio {
+ struct page page;
+};
+
+static inline struct folio *page_folio(struct page *page)
+{
+ unsigned long head = READ_ONCE(page->compound_head);
+
+ if (unlikely(head & 1))
+ return (struct folio *)(head - 1);
+ return (struct folio *)page;
+}
+
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
return &page[1].compound_mapcount;
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 02/11] mm: Add put_folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 01/11] mm: Introduce struct folio Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 03/11] mm: Add get_folio Matthew Wilcox (Oracle)
` (8 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
If we know we have a folio, we can call put_folio() instead of put_page()
and save the overhead of calling compound_head(). Also skips the
devmap checks.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7db9a10f084b..80d38cc9561c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1189,9 +1189,15 @@ static inline __must_check bool try_get_page(struct page *page)
return true;
}
+static inline void put_folio(struct folio *folio)
+{
+ if (put_page_testzero(&folio->page))
+ __put_page(&folio->page);
+}
+
static inline void put_page(struct page *page)
{
- page = compound_head(page);
+ struct folio *folio = page_folio(page);
/*
* For devmap managed pages we need to catch refcount transition from
@@ -1199,13 +1205,12 @@ static inline void put_page(struct page *page)
* need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/
- if (page_is_devmap_managed(page)) {
- put_devmap_managed_page(page);
+ if (page_is_devmap_managed(&folio->page)) {
+ put_devmap_managed_page(&folio->page);
return;
}
- if (put_page_testzero(page))
- __put_page(page);
+ put_folio(folio);
}
/*
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 03/11] mm: Add get_folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 01/11] mm: Introduce struct folio Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 02/11] mm: Add put_folio Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 04/11] mm: Create FolioFlags Matthew Wilcox (Oracle)
` (7 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
If we know we have a folio, we can call get_folio() instead of get_page()
and save the overhead of calling compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80d38cc9561c..32ac5c14097d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1167,15 +1167,17 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
#define page_ref_zero_or_close_to_overflow(page) \
((unsigned int) page_ref_count(page) + 127u <= 127u)
+static inline void get_folio(struct folio *folio)
+{
+ /* Getting a page requires an already elevated page->_refcount. */
+ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(&folio->page),
+ &folio->page);
+ page_ref_inc(&folio->page);
+}
+
static inline void get_page(struct page *page)
{
- page = compound_head(page);
- /*
- * Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_refcount.
- */
- VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
- page_ref_inc(page);
+ get_folio(page_folio(page));
}
bool __must_check try_grab_page(struct page *page, unsigned int flags);
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 04/11] mm: Create FolioFlags
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (2 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 03/11] mm: Add get_folio Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 05/11] mm: Add unlock_folio Matthew Wilcox (Oracle)
` (6 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
These new functions are the folio analogues of the PageFlags functions.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/page-flags.h | 36 +++++++++++++++++++++++++++++++++---
1 file changed, 33 insertions(+), 3 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index ec5d0290e0ee..2c51cd4b3630 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -212,6 +212,12 @@ static inline void page_init_poison(struct page *page, size_t size)
}
#endif
+static unsigned long *folio_flags(struct folio *folio)
+{
+ VM_BUG_ON_PGFLAGS(PagePoisoned(&folio->page), &folio->page);
+ return &folio->page.flags;
+}
+
/*
* Page flags policies wrt compound pages
*
@@ -260,30 +266,44 @@ static inline void page_init_poison(struct page *page, size_t size)
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
+static __always_inline int Folio##uname(struct folio *folio) \
+ { return test_bit(PG_##lname, folio_flags(folio)); } \
static __always_inline int Page##uname(struct page *page) \
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
+static __always_inline void SetFolio##uname(struct folio *folio) \
+ { set_bit(PG_##lname, folio_flags(folio)); } \
static __always_inline void SetPage##uname(struct page *page) \
{ set_bit(PG_##lname, &policy(page, 1)->flags); }
#define CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline void ClearFolio##uname(struct folio *folio) \
+ { clear_bit(PG_##lname, folio_flags(folio)); } \
static __always_inline void ClearPage##uname(struct page *page) \
{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define __SETPAGEFLAG(uname, lname, policy) \
+static __always_inline void __SetFolio##uname(struct folio *folio) \
+ { __set_bit(PG_##lname, folio_flags(folio)); } \
static __always_inline void __SetPage##uname(struct page *page) \
{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline void __ClearFolio##uname(struct folio *folio) \
+ { __clear_bit(PG_##lname, folio_flags(folio)); } \
static __always_inline void __ClearPage##uname(struct page *page) \
{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTSETFLAG(uname, lname, policy) \
+static __always_inline int TestSetFolio##uname(struct folio *folio) \
+ { return test_and_set_bit(PG_##lname, folio_flags(folio)); } \
static __always_inline int TestSetPage##uname(struct page *page) \
{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTCLEARFLAG(uname, lname, policy) \
+static __always_inline int TestClearFolio##uname(struct folio *folio) \
+ { return test_and_clear_bit(PG_##lname, folio_flags(folio)); } \
static __always_inline int TestClearPage##uname(struct page *page) \
{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
@@ -302,21 +322,27 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTCLEARFLAG(uname, lname, policy)
#define TESTPAGEFLAG_FALSE(uname) \
+static inline int Folio##uname(const struct folio *folio) { return 0; } \
static inline int Page##uname(const struct page *page) { return 0; }
#define SETPAGEFLAG_NOOP(uname) \
+static inline void SetFolio##uname(struct folio *folio) { } \
static inline void SetPage##uname(struct page *page) { }
#define CLEARPAGEFLAG_NOOP(uname) \
+static inline void ClearFolio##uname(struct folio *folio) { } \
static inline void ClearPage##uname(struct page *page) { }
#define __CLEARPAGEFLAG_NOOP(uname) \
+static inline void __ClearFolio##uname(struct folio *folio) { } \
static inline void __ClearPage##uname(struct page *page) { }
#define TESTSETFLAG_FALSE(uname) \
+static inline int TestSetFolio##uname(struct folio *folio) { return 0; } \
static inline int TestSetPage##uname(struct page *page) { return 0; }
#define TESTCLEARFLAG_FALSE(uname) \
+static inline int TestClearFolio##uname(struct folio *folio) { return 0; } \
static inline int TestClearPage##uname(struct page *page) { return 0; }
#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
@@ -509,11 +535,10 @@ TESTPAGEFLAG_FALSE(Ksm)
u64 stable_page_flags(struct page *page);
-static inline int PageUptodate(struct page *page)
+static inline int FolioUptodate(struct folio *folio)
{
int ret;
- page = compound_head(page);
- ret = test_bit(PG_uptodate, &(page)->flags);
+ ret = test_bit(PG_uptodate, folio_flags(folio));
/*
* Must ensure that the data we read out of the page is loaded
* _after_ we've loaded page->flags to check for PageUptodate.
@@ -528,6 +553,11 @@ static inline int PageUptodate(struct page *page)
return ret;
}
+static inline int PageUptodate(struct page *page)
+{
+ return FolioUptodate(page_folio(page));
+}
+
static __always_inline void __SetPageUptodate(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 05/11] mm: Add unlock_folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (3 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 04/11] mm: Create FolioFlags Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 06/11] mm: Add lock_folio Matthew Wilcox (Oracle)
` (5 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
Convert unlock_page() to call unlock_folio(). By using a folio we avoid
doing a repeated compound_head() This shortens the function from 120
bytes to 76 bytes.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/pagemap.h | 16 +++++++++++++++-
mm/filemap.c | 27 ++++++++++-----------------
2 files changed, 25 insertions(+), 18 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 46d4b1704770..64ae1bb62765 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -588,7 +588,21 @@ extern int __lock_page_killable(struct page *page);
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
-extern void unlock_page(struct page *page);
+extern void unlock_folio(struct folio *folio);
+
+/**
+ * unlock_page - Unlock a locked page.
+ * @page: The page.
+ *
+ * Unlocks the page and wakes up any thread sleeping on the page lock.
+ *
+ * Context: May be called from interrupt or process context. May not be
+ * called from NMI context.
+ */
+static inline void unlock_page(struct page *page)
+{
+ return unlock_folio(page_folio(page));
+}
/*
* Return true if the page was successfully locked
diff --git a/mm/filemap.c b/mm/filemap.c
index 78090ee08ac2..de8372307b33 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1443,29 +1443,22 @@ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem
#endif
/**
- * unlock_page - unlock a locked page
- * @page: the page
+ * unlock_folio - Unlock a locked folio.
+ * @folio: The folio.
*
- * Unlocks the page and wakes up sleepers in wait_on_page_locked().
- * Also wakes sleepers in wait_on_page_writeback() because the wakeup
- * mechanism between PageLocked pages and PageWriteback pages is shared.
- * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
+ * Unlocks the folio and wakes up any thread sleeping on the page lock.
*
- * Note that this depends on PG_waiters being the sign bit in the byte
- * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
- * clear the PG_locked bit and test PG_waiters at the same time fairly
- * portably (architectures that do LL/SC can test any bit, while x86 can
- * test the sign bit).
+ * Context: May be called from interrupt or process context. May not be
+ * called from NMI context.
*/
-void unlock_page(struct page *page)
+void unlock_folio(struct folio *folio)
{
BUILD_BUG_ON(PG_waiters != 7);
- page = compound_head(page);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
- wake_up_page_bit(page, PG_locked);
+ VM_BUG_ON_PAGE(!FolioLocked(folio), &folio->page);
+ if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio)))
+ wake_up_page_bit(&folio->page, PG_locked);
}
-EXPORT_SYMBOL(unlock_page);
+EXPORT_SYMBOL(unlock_folio);
/**
* end_page_writeback - end writeback against a page
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 06/11] mm: Add lock_folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (4 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 05/11] mm: Add unlock_folio Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 07/11] mm: Add lock_folio_killable Matthew Wilcox (Oracle)
` (4 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
This is like lock_page() but for use by callers who know they have a folio.
Convert __lock_page() to be __lock_folio(). This saves one call to
compound_head() per contended call to lock_page().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/pagemap.h | 21 +++++++++++++++------
mm/filemap.c | 29 +++++++++++++++--------------
2 files changed, 30 insertions(+), 20 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 64ae1bb62765..1d4a1828a434 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -583,7 +583,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
return true;
}
-extern void __lock_page(struct page *page);
+extern void __lock_folio(struct folio *folio);
extern int __lock_page_killable(struct page *page);
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
@@ -604,13 +604,24 @@ static inline void unlock_page(struct page *page)
return unlock_folio(page_folio(page));
}
+static inline bool trylock_folio(struct folio *folio)
+{
+ return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio)));
+}
+
/*
* Return true if the page was successfully locked
*/
static inline int trylock_page(struct page *page)
{
- page = compound_head(page);
- return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+ return trylock_folio(page_folio(page));
+}
+
+static inline void lock_folio(struct folio *folio)
+{
+ might_sleep();
+ if (!trylock_folio(folio))
+ __lock_folio(folio);
}
/*
@@ -618,9 +629,7 @@ static inline int trylock_page(struct page *page)
*/
static inline void lock_page(struct page *page)
{
- might_sleep();
- if (!trylock_page(page))
- __lock_page(page);
+ lock_folio(page_folio(page));
}
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index de8372307b33..8e87906f5dd6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1160,7 +1160,7 @@ static void wake_up_page(struct page *page, int bit)
*/
enum behavior {
EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
- * __lock_page() waiting on then setting PG_locked.
+ * __lock_folio() waiting on then setting PG_locked.
*/
SHARED, /* Hold ref to page and check the bit when woken, like
* wait_on_page_writeback() waiting on PG_writeback.
@@ -1523,17 +1523,16 @@ void page_endio(struct page *page, bool is_write, int err)
EXPORT_SYMBOL_GPL(page_endio);
/**
- * __lock_page - get a lock on the page, assuming we need to sleep to get it
- * @__page: the page to lock
+ * __lock_folio - Get a lock on the folio, assuming we need to sleep to get it.
+ * @folio: The folio to lock
*/
-void __lock_page(struct page *__page)
+void __lock_folio(struct folio *folio)
{
- struct page *page = compound_head(__page);
- wait_queue_head_t *q = page_waitqueue(page);
- wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
+ wait_queue_head_t *q = page_waitqueue(&folio->page);
+ wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
EXCLUSIVE);
}
-EXPORT_SYMBOL(__lock_page);
+EXPORT_SYMBOL(__lock_folio);
int __lock_page_killable(struct page *__page)
{
@@ -1587,10 +1586,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
return 0;
}
} else {
- __lock_page(page);
+ __lock_folio(page_folio(page));
}
- return 1;
+ return 1;
}
/**
@@ -2764,7 +2763,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
struct file **fpin)
{
- if (trylock_page(page))
+ struct folio *folio = page_folio(page);
+
+ if (trylock_folio(folio))
return 1;
/*
@@ -2777,7 +2778,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
if (vmf->flags & FAULT_FLAG_KILLABLE) {
- if (__lock_page_killable(page)) {
+ if (__lock_page_killable(&folio->page)) {
/*
* We didn't have the right flags to drop the mmap_lock,
* but all fault_handlers only check for fatal signals
@@ -2789,11 +2790,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
return 0;
}
} else
- __lock_page(page);
+ __lock_folio(folio);
+
return 1;
}
-
/*
* Synchronous readahead happens when we don't even find a page in the page
* cache at all. We don't want to perform IO under the mmap sem, so if we have
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 07/11] mm: Add lock_folio_killable
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (5 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 06/11] mm: Add lock_folio Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 08/11] mm/filemap: Convert end_page_writeback to use a folio Matthew Wilcox (Oracle)
` (3 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
This is like lock_page_killable() but for use by callers who
know they have a folio. Convert __lock_page_killable() to be
__lock_folio_killable(). This saves one call to compound_head() per
contended call to lock_page_killable().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/pagemap.h | 15 ++++++++++-----
mm/filemap.c | 17 +++++++++--------
2 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1d4a1828a434..060faeb8d701 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -584,7 +584,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
}
extern void __lock_folio(struct folio *folio);
-extern int __lock_page_killable(struct page *page);
+extern int __lock_folio_killable(struct folio *folio);
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
@@ -632,6 +632,14 @@ static inline void lock_page(struct page *page)
lock_folio(page_folio(page));
}
+static inline int lock_folio_killable(struct folio *folio)
+{
+ might_sleep();
+ if (!trylock_folio(folio))
+ return __lock_folio_killable(folio);
+ return 0;
+}
+
/*
* lock_page_killable is like lock_page but can be interrupted by fatal
* signals. It returns 0 if it locked the page and -EINTR if it was
@@ -639,10 +647,7 @@ static inline void lock_page(struct page *page)
*/
static inline int lock_page_killable(struct page *page)
{
- might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
- return 0;
+ return lock_folio_killable(page_folio(page));
}
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index 8e87906f5dd6..50535b21b452 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1534,14 +1534,13 @@ void __lock_folio(struct folio *folio)
}
EXPORT_SYMBOL(__lock_folio);
-int __lock_page_killable(struct page *__page)
+int __lock_folio_killable(struct folio *folio)
{
- struct page *page = compound_head(__page);
- wait_queue_head_t *q = page_waitqueue(page);
- return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
+ wait_queue_head_t *q = page_waitqueue(&folio->page);
+ return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
EXCLUSIVE);
}
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+EXPORT_SYMBOL_GPL(__lock_folio_killable);
int __lock_page_async(struct page *page, struct wait_page_queue *wait)
{
@@ -1562,6 +1561,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait)
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
+ struct folio *folio = page_folio(page);
+
if (fault_flag_allow_retry_first(flags)) {
/*
* CAUTION! In this case, mmap_lock is not released
@@ -1580,13 +1581,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
if (flags & FAULT_FLAG_KILLABLE) {
int ret;
- ret = __lock_page_killable(page);
+ ret = __lock_folio_killable(folio);
if (ret) {
mmap_read_unlock(mm);
return 0;
}
} else {
- __lock_folio(page_folio(page));
+ __lock_folio(folio);
}
return 1;
@@ -2778,7 +2779,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
if (vmf->flags & FAULT_FLAG_KILLABLE) {
- if (__lock_page_killable(&folio->page)) {
+ if (__lock_folio_killable(folio)) {
/*
* We didn't have the right flags to drop the mmap_lock,
* but all fault_handlers only check for fatal signals
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 08/11] mm/filemap: Convert end_page_writeback to use a folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (6 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 07/11] mm: Add lock_folio_killable Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 09/11] mm/filemap: Convert mapping_get_entry and pagecache_get_page to folio Matthew Wilcox (Oracle)
` (2 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
With my config, this function shrinks from 480 bytes to 240 bytes
due to elimination of repeated calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/filemap.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 50535b21b452..f1b65f777539 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1148,11 +1148,11 @@ static void wake_up_page_bit(struct page *page, int bit_nr)
spin_unlock_irqrestore(&q->lock, flags);
}
-static void wake_up_page(struct page *page, int bit)
+static void wake_up_folio(struct folio *folio, int bit)
{
- if (!PageWaiters(page))
+ if (!FolioWaiters(folio))
return;
- wake_up_page_bit(page, bit);
+ wake_up_page_bit(&folio->page, bit);
}
/*
@@ -1466,6 +1466,8 @@ EXPORT_SYMBOL(unlock_folio);
*/
void end_page_writeback(struct page *page)
{
+ struct folio *folio = page_folio(page);
+
/*
* TestClearPageReclaim could be used here but it is an atomic
* operation and overkill in this particular case. Failing to
@@ -1473,9 +1475,9 @@ void end_page_writeback(struct page *page)
* justify taking an atomic operation penalty at the end of
* ever page writeback.
*/
- if (PageReclaim(page)) {
- ClearPageReclaim(page);
- rotate_reclaimable_page(page);
+ if (FolioReclaim(folio)) {
+ ClearFolioReclaim(folio);
+ rotate_reclaimable_page(&folio->page);
}
/*
@@ -1484,13 +1486,13 @@ void end_page_writeback(struct page *page)
* But here we must make sure that the page is not freed and
* reused before the wake_up_page().
*/
- get_page(page);
- if (!test_clear_page_writeback(page))
+ get_folio(folio);
+ if (!test_clear_page_writeback(&folio->page))
BUG();
smp_mb__after_atomic();
- wake_up_page(page, PG_writeback);
- put_page(page);
+ wake_up_folio(folio, PG_writeback);
+ put_folio(folio);
}
EXPORT_SYMBOL(end_page_writeback);
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 09/11] mm/filemap: Convert mapping_get_entry and pagecache_get_page to folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (7 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 08/11] mm/filemap: Convert end_page_writeback to use a folio Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 10/11] mm/filemap: Add folio_add_to_page_cache Matthew Wilcox (Oracle)
2020-12-08 19:46 ` [RFC PATCH 11/11] mm/swap: Convert rotate_reclaimable_page to folio Matthew Wilcox (Oracle)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
Convert mapping_get_entry() to return a folio and convert
pagecache_get_page() to use the folio where possible. The seemingly
dangerous cast of a page pointer to a folio pointer is safe because
__page_cache_alloc() allocates an order-0 page, which is a folio by
definition.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/filemap.c | 45 ++++++++++++++++++++++++---------------------
1 file changed, 24 insertions(+), 21 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index f1b65f777539..56ff6aa24265 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1673,33 +1673,33 @@ EXPORT_SYMBOL(page_cache_prev_miss);
* @index: The page cache index.
*
* Looks up the page cache slot at @mapping & @offset. If there is a
- * page cache page, the head page is returned with an increased refcount.
+ * page cache page, the folio is returned with an increased refcount.
*
* If the slot holds a shadow entry of a previously evicted page, or a
* swap entry from shmem/tmpfs, it is returned.
*
- * Return: The head page or shadow entry, %NULL if nothing is found.
+ * Return: The folio or shadow entry, %NULL if nothing is found.
*/
-static struct page *mapping_get_entry(struct address_space *mapping,
+static struct folio *mapping_get_entry(struct address_space *mapping,
pgoff_t index)
{
XA_STATE(xas, &mapping->i_pages, index);
- struct page *page;
+ struct folio *folio;
rcu_read_lock();
repeat:
xas_reset(&xas);
- page = xas_load(&xas);
- if (xas_retry(&xas, page))
+ folio = xas_load(&xas);
+ if (xas_retry(&xas, folio))
goto repeat;
/*
* A shadow entry of a recently evicted page, or a swap entry from
* shmem/tmpfs. Return it without attempting to raise page count.
*/
- if (!page || xa_is_value(page))
+ if (!folio || xa_is_value(folio))
goto out;
- if (!page_cache_get_speculative(page))
+ if (!page_cache_get_speculative(&folio->page))
goto repeat;
/*
@@ -1707,14 +1707,14 @@ static struct page *mapping_get_entry(struct address_space *mapping,
* This is part of the lockless pagecache protocol. See
* include/linux/pagemap.h for details.
*/
- if (unlikely(page != xas_reload(&xas))) {
- put_page(page);
+ if (unlikely(folio != xas_reload(&xas))) {
+ put_folio(folio);
goto repeat;
}
out:
rcu_read_unlock();
- return page;
+ return folio;
}
/**
@@ -1754,11 +1754,13 @@ static struct page *mapping_get_entry(struct address_space *mapping,
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask)
{
+ struct folio *folio;
struct page *page;
repeat:
- page = mapping_get_entry(mapping, index);
- if (xa_is_value(page)) {
+ folio = mapping_get_entry(mapping, index);
+ page = &folio->page;
+ if (xa_is_value(folio)) {
if (fgp_flags & FGP_ENTRY)
return page;
page = NULL;
@@ -1768,18 +1770,18 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
if (fgp_flags & FGP_LOCK) {
if (fgp_flags & FGP_NOWAIT) {
- if (!trylock_page(page)) {
- put_page(page);
+ if (!trylock_folio(folio)) {
+ put_folio(folio);
return NULL;
}
} else {
- lock_page(page);
+ lock_folio(folio);
}
/* Has the page been truncated? */
if (unlikely(page->mapping != mapping)) {
- unlock_page(page);
- put_page(page);
+ unlock_folio(folio);
+ put_folio(folio);
goto repeat;
}
VM_BUG_ON_PAGE(!thp_contains(page, index), page);
@@ -1806,17 +1808,18 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
+ folio = (struct folio *)page;
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
/* Init accessed so avoid atomic mark_page_accessed later */
if (fgp_flags & FGP_ACCESSED)
- __SetPageReferenced(page);
+ __SetFolioReferenced(folio);
err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
if (unlikely(err)) {
- put_page(page);
+ put_folio(folio);
page = NULL;
if (err == -EEXIST)
goto repeat;
@@ -1827,7 +1830,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
* an unlocked page.
*/
if (page && (fgp_flags & FGP_FOR_MMAP))
- unlock_page(page);
+ unlock_folio(folio);
}
return page;
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [RFC PATCH 10/11] mm/filemap: Add folio_add_to_page_cache
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (8 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 09/11] mm/filemap: Convert mapping_get_entry and pagecache_get_page to folio Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
2020-12-11 8:32 ` Nikolay Borisov
2020-12-08 19:46 ` [RFC PATCH 11/11] mm/swap: Convert rotate_reclaimable_page to folio Matthew Wilcox (Oracle)
10 siblings, 1 reply; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
Pages being added to the page cache should already be folios, so
turn add_to_page_cache_lru() into a wrapper. Saves hundreds of
bytes of text.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/pagemap.h | 13 +++++++--
mm/filemap.c | 62 ++++++++++++++++++++---------------------
2 files changed, 41 insertions(+), 34 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 060faeb8d701..3bc56b3aa384 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -778,9 +778,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
}
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
+ pgoff_t index, gfp_t gfp);
+int folio_add_to_page_cache(struct folio *folio, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
@@ -805,6 +805,13 @@ static inline int add_to_page_cache(struct page *page,
return error;
}
+static inline int add_to_page_cache_lru(struct page *page,
+ struct address_space *mapping, pgoff_t index, gfp_t gfp)
+{
+ return folio_add_to_page_cache((struct folio *)page, mapping,
+ index, gfp);
+}
+
/**
* struct readahead_control - Describes a readahead request.
*
diff --git a/mm/filemap.c b/mm/filemap.c
index 56ff6aa24265..297144524f58 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -828,25 +828,25 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
-static noinline int __add_to_page_cache_locked(struct page *page,
+static noinline int __add_to_page_cache_locked(struct folio *folio,
struct address_space *mapping,
- pgoff_t offset, gfp_t gfp,
+ pgoff_t index, gfp_t gfp,
void **shadowp)
{
- XA_STATE(xas, &mapping->i_pages, offset);
- int huge = PageHuge(page);
+ XA_STATE(xas, &mapping->i_pages, index);
+ int huge = PageHuge(&folio->page);
int error;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(PageSwapBacked(page), page);
+ VM_BUG_ON_PAGE(!FolioLocked(folio), &folio->page);
+ VM_BUG_ON_PAGE(FolioSwapBacked(folio), &folio->page);
mapping_set_update(&xas, mapping);
- get_page(page);
- page->mapping = mapping;
- page->index = offset;
+ get_folio(folio);
+ folio->page.mapping = mapping;
+ folio->page.index = index;
- if (!huge && !page_is_secretmem(page)) {
- error = mem_cgroup_charge(page, current->mm, gfp);
+ if (!huge && !page_is_secretmem(&folio->page)) {
+ error = mem_cgroup_charge(&folio->page, current->mm, gfp);
if (error)
goto error;
}
@@ -857,7 +857,7 @@ static noinline int __add_to_page_cache_locked(struct page *page,
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
void *entry, *old = NULL;
- if (order > thp_order(page))
+ if (order > folio_order(folio))
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
order, gfp);
xas_lock_irq(&xas);
@@ -874,13 +874,13 @@ static noinline int __add_to_page_cache_locked(struct page *page,
*shadowp = old;
/* entry may have been split before we acquired lock */
order = xa_get_order(xas.xa, xas.xa_index);
- if (order > thp_order(page)) {
+ if (order > folio_order(folio)) {
xas_split(&xas, old, order);
xas_reset(&xas);
}
}
- xas_store(&xas, page);
+ xas_store(&xas, folio);
if (xas_error(&xas))
goto unlock;
@@ -890,7 +890,7 @@ static noinline int __add_to_page_cache_locked(struct page *page,
/* hugetlb pages do not participate in page cache accounting */
if (!huge)
- __inc_lruvec_page_state(page, NR_FILE_PAGES);
+ __inc_lruvec_page_state(&folio->page, NR_FILE_PAGES);
unlock:
xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp));
@@ -900,12 +900,12 @@ static noinline int __add_to_page_cache_locked(struct page *page,
goto error;
}
- trace_mm_filemap_add_to_page_cache(page);
+ trace_mm_filemap_add_to_page_cache(&folio->page);
return 0;
error:
- page->mapping = NULL;
+ folio->page.mapping = NULL;
/* Leave page->index set: truncation relies upon it */
- put_page(page);
+ put_folio(folio);
return error;
}
ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
@@ -925,22 +925,22 @@ ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
- return __add_to_page_cache_locked(page, mapping, offset,
+ return __add_to_page_cache_locked(page_folio(page), mapping, offset,
gfp_mask, NULL);
}
EXPORT_SYMBOL(add_to_page_cache_locked);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask)
+int folio_add_to_page_cache(struct folio *folio, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask)
{
void *shadow = NULL;
int ret;
- __SetPageLocked(page);
- ret = __add_to_page_cache_locked(page, mapping, offset,
+ __SetFolioLocked(folio);
+ ret = __add_to_page_cache_locked(folio, mapping, index,
gfp_mask, &shadow);
if (unlikely(ret))
- __ClearPageLocked(page);
+ __ClearFolioLocked(folio);
else {
/*
* The page might have been evicted from cache only
@@ -950,14 +950,14 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
* data from the working set, only to cache data that will
* get overwritten with something else, is a waste of memory.
*/
- WARN_ON_ONCE(PageActive(page));
+ WARN_ON_ONCE(FolioActive(folio));
if (!(gfp_mask & __GFP_WRITE) && shadow)
- workingset_refault(page, shadow);
- lru_cache_add(page);
+ workingset_refault(&folio->page, shadow);
+ lru_cache_add(&folio->page);
}
return ret;
}
-EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
+EXPORT_SYMBOL_GPL(folio_add_to_page_cache);
#ifdef CONFIG_NUMA
struct page *__page_cache_alloc(gfp_t gfp)
@@ -1817,7 +1817,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
if (fgp_flags & FGP_ACCESSED)
__SetFolioReferenced(folio);
- err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
+ err = folio_add_to_page_cache(folio, mapping, index, gfp_mask);
if (unlikely(err)) {
put_folio(folio);
page = NULL;
@@ -1826,8 +1826,8 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
}
/*
- * add_to_page_cache_lru locks the page, and for mmap we expect
- * an unlocked page.
+ * folio_add_to_page_cache locks the page, and for mmap we
+ * expect an unlocked page.
*/
if (page && (fgp_flags & FGP_FOR_MMAP))
unlock_folio(folio);
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [RFC PATCH 10/11] mm/filemap: Add folio_add_to_page_cache
2020-12-08 19:46 ` [RFC PATCH 10/11] mm/filemap: Add folio_add_to_page_cache Matthew Wilcox (Oracle)
@ 2020-12-11 8:32 ` Nikolay Borisov
0 siblings, 0 replies; 13+ messages in thread
From: Nikolay Borisov @ 2020-12-11 8:32 UTC (permalink / raw)
To: Matthew Wilcox (Oracle), linux-fsdevel, linux-mm; +Cc: linux-kernel
On 8.12.20 г. 21:46 ч., Matthew Wilcox (Oracle) wrote:
> Pages being added to the page cache should already be folios, so
> turn add_to_page_cache_lru() into a wrapper. Saves hundreds of
> bytes of text.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> include/linux/pagemap.h | 13 +++++++--
> mm/filemap.c | 62 ++++++++++++++++++++---------------------
> 2 files changed, 41 insertions(+), 34 deletions(-)
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 060faeb8d701..3bc56b3aa384 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -778,9 +778,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
> }
>
> int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
> - pgoff_t index, gfp_t gfp_mask);
> -int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
> - pgoff_t index, gfp_t gfp_mask);
> + pgoff_t index, gfp_t gfp);
> +int folio_add_to_page_cache(struct folio *folio, struct address_space *mapping,
> + pgoff_t index, gfp_t gfp);
> extern void delete_from_page_cache(struct page *page);
> extern void __delete_from_page_cache(struct page *page, void *shadow);
> int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
> @@ -805,6 +805,13 @@ static inline int add_to_page_cache(struct page *page,
> return error;
> }
>
> +static inline int add_to_page_cache_lru(struct page *page,
> + struct address_space *mapping, pgoff_t index, gfp_t gfp)
> +{
> + return folio_add_to_page_cache((struct folio *)page, mapping,
> + index, gfp);
> +}
> +
> /**
> * struct readahead_control - Describes a readahead request.
> *
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 56ff6aa24265..297144524f58 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -828,25 +828,25 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
> }
> EXPORT_SYMBOL_GPL(replace_page_cache_page);
>
> -static noinline int __add_to_page_cache_locked(struct page *page,
> +static noinline int __add_to_page_cache_locked(struct folio *folio,
> struct address_space *mapping,
> - pgoff_t offset, gfp_t gfp,
> + pgoff_t index, gfp_t gfp,
> void **shadowp)
> {
> - XA_STATE(xas, &mapping->i_pages, offset);
> - int huge = PageHuge(page);
> + XA_STATE(xas, &mapping->i_pages, index);
> + int huge = PageHuge(&folio->page);
PageHuge also does page_compound, since you know this is either the head
page or not you could use PageHeadHuge which simply checks if it's a
head page and then goes directly to perform the hugepage check via the
dtor.
<snip>
^ permalink raw reply [flat|nested] 13+ messages in thread
* [RFC PATCH 11/11] mm/swap: Convert rotate_reclaimable_page to folio
2020-12-08 19:46 [RFC PATCH 00/11] Page folios Matthew Wilcox (Oracle)
` (9 preceding siblings ...)
2020-12-08 19:46 ` [RFC PATCH 10/11] mm/filemap: Add folio_add_to_page_cache Matthew Wilcox (Oracle)
@ 2020-12-08 19:46 ` Matthew Wilcox (Oracle)
10 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox (Oracle) @ 2020-12-08 19:46 UTC (permalink / raw)
To: linux-fsdevel, linux-mm; +Cc: Matthew Wilcox (Oracle), linux-kernel
Move the declaration into mm/internal.h and rename the function to
rotate_reclaimable_folio(). This eliminates all five of the calls to
compound_head() in this function.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/swap.h | 1 -
mm/filemap.c | 2 +-
mm/internal.h | 1 +
mm/page_io.c | 4 ++--
mm/swap.c | 12 ++++++------
5 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 5bba15ac5a2e..5aaca35ce887 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -343,7 +343,6 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
diff --git a/mm/filemap.c b/mm/filemap.c
index 297144524f58..93e40e9ac357 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1477,7 +1477,7 @@ void end_page_writeback(struct page *page)
*/
if (FolioReclaim(folio)) {
ClearFolioReclaim(folio);
- rotate_reclaimable_page(&folio->page);
+ rotate_reclaimable_folio(folio);
}
/*
diff --git a/mm/internal.h b/mm/internal.h
index 8e9c660f33ca..f089535b5d86 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -35,6 +35,7 @@
void page_writeback_init(void);
vm_fault_t do_swap_page(struct vm_fault *vmf);
+void rotate_reclaimable_folio(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
diff --git a/mm/page_io.c b/mm/page_io.c
index 9bca17ecc4df..1fc0a579da58 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -57,7 +57,7 @@ void end_swap_bio_write(struct bio *bio)
* Also print a dire warning that things will go BAD (tm)
* very quickly.
*
- * Also clear PG_reclaim to avoid rotate_reclaimable_page()
+ * Also clear PG_reclaim to avoid rotate_reclaimable_folio()
*/
set_page_dirty(page);
pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
@@ -341,7 +341,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
* temporary failure if the system has limited
* memory for allocating transmit buffers.
* Mark the page dirty and avoid
- * rotate_reclaimable_page but rate-limit the
+ * rotate_reclaimable_folio but rate-limit the
* messages but do not flag PageError like
* the normal direct-to-bio case as it could
* be temporary.
diff --git a/mm/swap.c b/mm/swap.c
index 5022dfe388ad..9aadde8aea9b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -241,19 +241,19 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
* reclaim. If it still appears to be reclaimable, move it to the tail of the
* inactive list.
*
- * rotate_reclaimable_page() must disable IRQs, to prevent nasty races.
+ * rotate_reclaimable_folio() must disable IRQs, to prevent nasty races.
*/
-void rotate_reclaimable_page(struct page *page)
+void rotate_reclaimable_folio(struct folio *folio)
{
- if (!PageLocked(page) && !PageDirty(page) &&
- !PageUnevictable(page) && PageLRU(page)) {
+ if (!FolioLocked(folio) && !FolioDirty(folio) &&
+ !FolioUnevictable(folio) && FolioLRU(folio)) {
struct pagevec *pvec;
unsigned long flags;
- get_page(page);
+ get_folio(folio);
local_lock_irqsave(&lru_rotate.lock, flags);
pvec = this_cpu_ptr(&lru_rotate.pvec);
- if (!pagevec_add(pvec, page) || PageCompound(page))
+ if (!pagevec_add(pvec, &folio->page) || FolioHead(folio))
pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
local_unlock_irqrestore(&lru_rotate.lock, flags);
}
--
2.29.2
^ permalink raw reply related [flat|nested] 13+ messages in thread