linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org
Subject: [PATCH v4 20/25] mm/filemap: Convert wait_on_page_bit to wait_on_folio_bit
Date: Fri,  5 Mar 2021 04:18:56 +0000	[thread overview]
Message-ID: <20210305041901.2396498-21-willy@infradead.org> (raw)
In-Reply-To: <20210305041901.2396498-1-willy@infradead.org>

We must always wait on the folio, otherwise we won't be woken up.

This commit shrinks the kernel by 695 bytes, mostly due to moving
the page waitqueue lookup into wait_on_folio_bit_common().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/afs/write.c          | 21 ++++++++--------
 include/linux/pagemap.h | 10 ++++----
 mm/filemap.c            | 56 ++++++++++++++++++-----------------------
 mm/page-writeback.c     |  2 +-
 4 files changed, 42 insertions(+), 47 deletions(-)

diff --git a/fs/afs/write.c b/fs/afs/write.c
index c9195fc67fd8..0c313117200a 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -834,13 +834,14 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  */
 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 {
+	struct folio *folio = page_folio(vmf->page);
 	struct file *file = vmf->vma->vm_file;
 	struct inode *inode = file_inode(file);
 	struct afs_vnode *vnode = AFS_FS_I(inode);
 	unsigned long priv;
 
 	_enter("{{%llx:%llu}},{%lx}",
-	       vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
+	       vnode->fid.vid, vnode->fid.vnode, folio->page.index);
 
 	sb_start_pagefault(inode->i_sb);
 
@@ -851,27 +852,27 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 	fscache_wait_on_page_write(vnode->cache, vmf->page);
 #endif
 
-	if (PageWriteback(vmf->page) &&
-	    wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
+	if (FolioWriteback(folio) &&
+	    wait_on_folio_bit_killable(folio, PG_writeback) < 0)
 		return VM_FAULT_RETRY;
 
-	if (lock_page_killable(vmf->page) < 0)
+	if (lock_folio_killable(folio) < 0)
 		return VM_FAULT_RETRY;
 
 	/* We mustn't change page->private until writeback is complete as that
 	 * details the portion of the page we need to write back and we might
 	 * need to redirty the page if there's a problem.
 	 */
-	wait_on_page_writeback(vmf->page);
+	wait_on_folio_writeback(folio);
 
-	priv = afs_page_dirty(0, PAGE_SIZE);
+	priv = afs_page_dirty(0, folio_size(folio));
 	priv = afs_page_dirty_mmapped(priv);
 	trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
-			     vmf->page->index, priv);
-	if (PagePrivate(vmf->page))
-		set_page_private(vmf->page, priv);
+			     folio->page.index, priv);
+	if (FolioPrivate(folio))
+		set_folio_private(folio, priv);
 	else
-		attach_page_private(vmf->page, (void *)priv);
+		attach_folio_private(folio, (void *)priv);
 	file_update_time(file);
 
 	sb_end_pagefault(inode->i_sb);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e7ca8def2f0d..8737eb86602e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -725,11 +725,11 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 }
 
 /*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for wait_on_folio_locked/wait_on_folio_writeback, etc.,
  * and should not be used directly.
  */
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+extern void wait_on_folio_bit(struct folio *folio, int bit_nr);
+extern int wait_on_folio_bit_killable(struct folio *folio, int bit_nr);
 
 /* 
  * Wait for a folio to be unlocked.
@@ -741,14 +741,14 @@ extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 static inline void wait_on_folio_locked(struct folio *folio)
 {
 	if (FolioLocked(folio))
-		wait_on_page_bit(&folio->page, PG_locked);
+		wait_on_folio_bit(folio, PG_locked);
 }
 
 static inline int wait_on_folio_locked_killable(struct folio *folio)
 {
 	if (!FolioLocked(folio))
 		return 0;
-	return wait_on_page_bit_killable(&folio->page, PG_locked);
+	return wait_on_folio_bit_killable(folio, PG_locked);
 }
 
 static inline void wait_on_page_locked(struct page *page)
diff --git a/mm/filemap.c b/mm/filemap.c
index 50263fa62574..ec5a743b4e0f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1075,7 +1075,7 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
 	 *
 	 * So update the flags atomically, and wake up the waiter
 	 * afterwards to avoid any races. This store-release pairs
-	 * with the load-acquire in wait_on_page_bit_common().
+	 * with the load-acquire in wait_on_folio_bit_common().
 	 */
 	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
 	wake_up_state(wait->private, mode);
@@ -1156,7 +1156,7 @@ static void wake_up_folio(struct folio *folio, int bit)
 }
 
 /*
- * A choice of three behaviors for wait_on_page_bit_common():
+ * A choice of three behaviors for wait_on_folio_bit_common():
  */
 enum behavior {
 	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
@@ -1190,9 +1190,10 @@ static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
 /* How many times do we accept lock stealing from under a waiter? */
 int sysctl_page_lock_unfairness = 5;
 
-static inline int wait_on_page_bit_common(wait_queue_head_t *q,
-	struct page *page, int bit_nr, int state, enum behavior behavior)
+static inline int wait_on_folio_bit_common(struct folio *folio, int bit_nr,
+		int state, enum behavior behavior)
 {
+	wait_queue_head_t *q = page_waitqueue(&folio->page);
 	int unfairness = sysctl_page_lock_unfairness;
 	struct wait_page_queue wait_page;
 	wait_queue_entry_t *wait = &wait_page.wait;
@@ -1201,8 +1202,8 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	unsigned long pflags;
 
 	if (bit_nr == PG_locked &&
-	    !PageUptodate(page) && PageWorkingset(page)) {
-		if (!PageSwapBacked(page)) {
+	    !FolioUptodate(folio) && FolioWorkingset(folio)) {
+		if (!FolioSwapBacked(folio)) {
 			delayacct_thrashing_start();
 			delayacct = true;
 		}
@@ -1212,7 +1213,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 
 	init_wait(wait);
 	wait->func = wake_page_function;
-	wait_page.page = page;
+	wait_page.page = &folio->page;
 	wait_page.bit_nr = bit_nr;
 
 repeat:
@@ -1227,7 +1228,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	 * Do one last check whether we can get the
 	 * page bit synchronously.
 	 *
-	 * Do the SetPageWaiters() marking before that
+	 * Do the SetFolioWaiters() marking before that
 	 * to let any waker we _just_ missed know they
 	 * need to wake us up (otherwise they'll never
 	 * even go to the slow case that looks at the
@@ -1238,8 +1239,8 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	 * lock to avoid races.
 	 */
 	spin_lock_irq(&q->lock);
-	SetPageWaiters(page);
-	if (!trylock_page_bit_common(page, bit_nr, wait))
+	SetFolioWaiters(folio);
+	if (!trylock_page_bit_common(&folio->page, bit_nr, wait))
 		__add_wait_queue_entry_tail(q, wait);
 	spin_unlock_irq(&q->lock);
 
@@ -1249,10 +1250,10 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	 * see whether the page bit testing has already
 	 * been done by the wake function.
 	 *
-	 * We can drop our reference to the page.
+	 * We can drop our reference to the folio.
 	 */
 	if (behavior == DROP)
-		put_page(page);
+		put_folio(folio);
 
 	/*
 	 * Note that until the "finish_wait()", or until
@@ -1289,7 +1290,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 		 *
 		 * And if that fails, we'll have to retry this all.
 		 */
-		if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
+		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio))))
 			goto repeat;
 
 		wait->flags |= WQ_FLAG_DONE;
@@ -1298,7 +1299,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 
 	/*
 	 * If a signal happened, this 'finish_wait()' may remove the last
-	 * waiter from the wait-queues, but the PageWaiters bit will remain
+	 * waiter from the wait-queues, but the FolioWaiters bit will remain
 	 * set. That's ok. The next wakeup will take care of it, and trying
 	 * to do it here would be difficult and prone to races.
 	 */
@@ -1329,19 +1330,17 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
 }
 
-void wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_folio_bit(struct folio *folio, int bit_nr)
 {
-	wait_queue_head_t *q = page_waitqueue(page);
-	wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
+	wait_on_folio_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
 }
-EXPORT_SYMBOL(wait_on_page_bit);
+EXPORT_SYMBOL(wait_on_folio_bit);
 
-int wait_on_page_bit_killable(struct page *page, int bit_nr)
+int wait_on_folio_bit_killable(struct folio *folio, int bit_nr)
 {
-	wait_queue_head_t *q = page_waitqueue(page);
-	return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
+	return wait_on_folio_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
 }
-EXPORT_SYMBOL(wait_on_page_bit_killable);
+EXPORT_SYMBOL(wait_on_folio_bit_killable);
 
 /**
  * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
@@ -1358,11 +1357,8 @@ EXPORT_SYMBOL(wait_on_page_bit_killable);
  */
 int put_and_wait_on_page_locked(struct page *page, int state)
 {
-	wait_queue_head_t *q;
-
-	page = compound_head(page);
-	q = page_waitqueue(page);
-	return wait_on_page_bit_common(q, page, PG_locked, state, DROP);
+	return wait_on_folio_bit_common(page_folio(page), PG_locked, state,
+			DROP);
 }
 
 /**
@@ -1493,16 +1489,14 @@ EXPORT_SYMBOL_GPL(page_endio);
  */
 void __lock_folio(struct folio *folio)
 {
-	wait_queue_head_t *q = page_waitqueue(&folio->page);
-	wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
+	wait_on_folio_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
 				EXCLUSIVE);
 }
 EXPORT_SYMBOL(__lock_folio);
 
 int __lock_folio_killable(struct folio *folio)
 {
-	wait_queue_head_t *q = page_waitqueue(&folio->page);
-	return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
+	return wait_on_folio_bit_common(folio, PG_locked, TASK_KILLABLE,
 					EXCLUSIVE);
 }
 EXPORT_SYMBOL_GPL(__lock_folio_killable);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index eef36edc9e0c..6c1b4737c383 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2835,7 +2835,7 @@ void wait_on_folio_writeback(struct folio *folio)
 {
 	while (FolioWriteback(folio)) {
 		trace_wait_on_page_writeback(&folio->page, folio_mapping(folio));
-		wait_on_page_bit(&folio->page, PG_writeback);
+		wait_on_folio_bit(folio, PG_writeback);
 	}
 }
 EXPORT_SYMBOL_GPL(wait_on_folio_writeback);
-- 
2.30.0



  parent reply	other threads:[~2021-03-05  4:26 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-05  4:18 [PATCH v4 00/25] Page folios Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 01/25] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-03-13 20:37   ` Andrew Morton
2021-03-14  4:07     ` Matthew Wilcox
2021-03-17 17:14   ` Christoph Hellwig
2021-03-18 23:56   ` Balbir Singh
2021-03-19  1:25     ` Matthew Wilcox
2021-03-20  2:09       ` Balbir Singh
2021-03-22  2:52       ` Nicholas Piggin
2021-03-22  3:54         ` Matthew Wilcox
2021-03-05  4:18 ` [PATCH v4 02/25] mm: Add folio_pgdat and folio_zone Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 03/25] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-03-13 20:37   ` Andrew Morton
2021-03-14  4:11     ` Matthew Wilcox
2021-03-14  4:51       ` Andrew Morton
2021-03-17 17:16   ` Christoph Hellwig
2021-03-05  4:18 ` [PATCH v4 04/25] mm/debug: Add VM_BUG_ON_FOLIO and VM_WARN_ON_ONCE_FOLIO Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 05/25] mm: Add put_folio Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 06/25] mm: Add get_folio Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 07/25] mm: Create FolioFlags Matthew Wilcox (Oracle)
2021-03-15  2:24   ` Matthew Wilcox
2021-03-05  4:18 ` [PATCH v4 08/25] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-03-17 17:20   ` Christoph Hellwig
2021-03-18 17:57     ` Matthew Wilcox
2021-03-05  4:18 ` [PATCH v4 09/25] mm: Add folio_index, folio_page and folio_contains Matthew Wilcox (Oracle)
2021-03-13 20:37   ` Andrew Morton
2021-03-14  3:45     ` Matthew Wilcox
2021-03-17 17:22     ` Christoph Hellwig
2021-03-05  4:18 ` [PATCH v4 10/25] mm/util: Add folio_mapping and folio_file_mapping Matthew Wilcox (Oracle)
2021-03-17 17:26   ` Christoph Hellwig
2021-03-05  4:18 ` [PATCH v4 11/25] mm/memcg: Add folio wrappers for various memcontrol functions Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 12/25] mm/filemap: Add unlock_folio Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 13/25] mm/filemap: Add lock_folio Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 14/25] mm/filemap: Add lock_folio_killable Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 15/25] mm/filemap: Convert lock_page_async to lock_folio_async Matthew Wilcox (Oracle)
2021-03-17 17:29   ` Christoph Hellwig
2021-03-05  4:18 ` [PATCH v4 16/25] mm/filemap: Convert end_page_writeback to end_folio_writeback Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 17/25] mm/filemap: Add wait_on_folio_locked & wait_on_folio_locked_killable Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 18/25] mm/page-writeback: Add wait_on_folio_writeback Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 19/25] mm/page-writeback: Add wait_for_stable_folio Matthew Wilcox (Oracle)
2021-03-05  4:18 ` Matthew Wilcox (Oracle) [this message]
2021-03-17 17:39   ` [PATCH v4 20/25] mm/filemap: Convert wait_on_page_bit to wait_on_folio_bit Christoph Hellwig
2021-03-05  4:18 ` [PATCH v4 21/25] mm/filemap: Add __lock_folio_or_retry Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 22/25] mm/filemap: Convert wake_up_page_bit to wake_up_folio_bit Matthew Wilcox (Oracle)
2021-03-05  4:18 ` [PATCH v4 23/25] mm/page-writeback: Convert test_clear_page_writeback to take a folio Matthew Wilcox (Oracle)
2021-03-05  4:19 ` [PATCH v4 24/25] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-03-05  4:19 ` [PATCH v4 25/25] cachefiles: Switch to wait_page_key Matthew Wilcox (Oracle)
2021-03-17 17:45   ` Christoph Hellwig
2021-03-13 20:36 ` [PATCH v4 00/25] Page folios Andrew Morton
2021-03-14  2:30   ` Matthew Wilcox
2021-03-14  3:09   ` Hugh Dickins
     [not found]     ` <20210315115501.7rmzaan2hxsqowgq@box>
2021-03-15 13:45       ` Matthew Wilcox
     [not found]       ` <YE9VLGl50hLIJHci@dhcp22.suse.cz>
2021-03-15 19:09         ` Christoph Hellwig
2021-03-15 19:40           ` Matthew Wilcox
2021-03-15 22:27             ` Dave Chinner
2021-03-17 17:48             ` Christoph Hellwig
2021-03-15 21:33         ` Andi Kleen
2021-03-19  4:01 ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210305041901.2396498-21-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).