linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	akpm@linux-foundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Christoph Hellwig <hch@lst.de>, Jeff Layton <jlayton@kernel.org>
Subject: [PATCH v8 18/31] mm/filemap: Add folio_lock
Date: Fri, 30 Apr 2021 19:07:27 +0100	[thread overview]
Message-ID: <20210430180740.2707166-19-willy@infradead.org> (raw)
In-Reply-To: <20210430180740.2707166-1-willy@infradead.org>

This is like lock_page() but for use by callers who know they have a folio.
Convert __lock_page() to be __folio_lock().  This saves one call to
compound_head() per contended call to lock_page().

Saves 362 bytes of text; mostly from improved register allocation and
inlining decisions.  __folio_lock is 59 bytes while __lock_page was 79.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jeff Layton <jlayton@kernel.org>
---
 include/linux/pagemap.h | 24 +++++++++++++++++++-----
 mm/filemap.c            | 29 +++++++++++++++--------------
 2 files changed, 34 insertions(+), 19 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9126c0db3a60..ff81be103539 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -714,7 +714,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
 	return true;
 }
 
-extern void __lock_page(struct page *page);
+void __folio_lock(struct folio *folio);
 extern int __lock_page_killable(struct page *page);
 extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
@@ -722,13 +722,24 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 void unlock_page(struct page *page);
 void folio_unlock(struct folio *folio);
 
+static inline bool folio_trylock(struct folio *folio)
+{
+	return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+}
+
 /*
  * Return true if the page was successfully locked
  */
 static inline int trylock_page(struct page *page)
 {
-	page = compound_head(page);
-	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+	return folio_trylock(page_folio(page));
+}
+
+static inline void folio_lock(struct folio *folio)
+{
+	might_sleep();
+	if (!folio_trylock(folio))
+		__folio_lock(folio);
 }
 
 /*
@@ -736,9 +747,12 @@ static inline int trylock_page(struct page *page)
  */
 static inline void lock_page(struct page *page)
 {
+	struct folio *folio;
 	might_sleep();
-	if (!trylock_page(page))
-		__lock_page(page);
+
+	folio = page_folio(page);
+	if (!folio_trylock(folio))
+		__folio_lock(folio);
 }
 
 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 090b303bcd45..6935b068856f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1187,7 +1187,7 @@ static void wake_up_page(struct page *page, int bit)
  */
 enum behavior {
 	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
-			 * __lock_page() waiting on then setting PG_locked.
+			 * __folio_lock() waiting on then setting PG_locked.
 			 */
 	SHARED,		/* Hold ref to page and check the bit when woken, like
 			 * wait_on_page_writeback() waiting on PG_writeback.
@@ -1576,17 +1576,16 @@ void page_endio(struct page *page, bool is_write, int err)
 EXPORT_SYMBOL_GPL(page_endio);
 
 /**
- * __lock_page - get a lock on the page, assuming we need to sleep to get it
- * @__page: the page to lock
+ * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
+ * @folio: The folio to lock
  */
-void __lock_page(struct page *__page)
+void __folio_lock(struct folio *folio)
 {
-	struct page *page = compound_head(__page);
-	wait_queue_head_t *q = page_waitqueue(page);
-	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
+	wait_queue_head_t *q = page_waitqueue(&folio->page);
+	wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
 				EXCLUSIVE);
 }
-EXPORT_SYMBOL(__lock_page);
+EXPORT_SYMBOL(__folio_lock);
 
 int __lock_page_killable(struct page *__page)
 {
@@ -1661,10 +1660,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 			return 0;
 		}
 	} else {
-		__lock_page(page);
+		__folio_lock(page_folio(page));
 	}
-	return 1;
 
+	return 1;
 }
 
 /**
@@ -2815,7 +2814,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 				     struct file **fpin)
 {
-	if (trylock_page(page))
+	struct folio *folio = page_folio(page);
+
+	if (folio_trylock(folio))
 		return 1;
 
 	/*
@@ -2828,7 +2829,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 
 	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
 	if (vmf->flags & FAULT_FLAG_KILLABLE) {
-		if (__lock_page_killable(page)) {
+		if (__lock_page_killable(&folio->page)) {
 			/*
 			 * We didn't have the right flags to drop the mmap_lock,
 			 * but all fault_handlers only check for fatal signals
@@ -2840,11 +2841,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 			return 0;
 		}
 	} else
-		__lock_page(page);
+		__folio_lock(folio);
+
 	return 1;
 }
 
-
 /*
  * Synchronous readahead happens when we don't even find a page in the page
  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
-- 
2.30.2


  parent reply	other threads:[~2021-04-30 18:25 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-30 18:07 [PATCH v8.1 00/31] Memory Folios Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 01/31] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 02/31] mm: Add folio_pgdat and folio_zone Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 03/31] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 04/31] mm/debug: Add VM_BUG_ON_FOLIO and VM_WARN_ON_ONCE_FOLIO Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 05/31] mm: Add folio reference count functions Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 06/31] mm: Add folio_put Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 07/31] mm: Add folio_get Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 08/31] mm: Add folio flag manipulation functions Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 09/31] mm: Add folio_young() and folio_idle() Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 10/31] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 11/31] mm/filemap: Add folio_index, folio_file_page and folio_contains Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 12/31] mm/filemap: Add folio_next_index Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 13/31] mm/filemap: Add folio_offset and folio_file_offset Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 14/31] mm/util: Add folio_mapping and folio_file_mapping Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 15/31] mm: Add folio_mapcount Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 16/31] mm/memcg: Add folio wrappers for various functions Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 17/31] mm/filemap: Add folio_unlock Matthew Wilcox (Oracle)
2021-04-30 18:07 ` Matthew Wilcox (Oracle) [this message]
2021-04-30 18:07 ` [PATCH v8 19/31] mm/filemap: Add folio_lock_killable Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 20/31] mm/filemap: Add __folio_lock_async Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 21/31] mm/filemap: Add __folio_lock_or_retry Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 22/31] mm/filemap: Add folio_wait_locked Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 23/31] mm/swap: Add folio_rotate_reclaimable Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 24/31] mm/filemap: Add folio_end_writeback Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 25/31] mm/writeback: Add folio_wait_writeback Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 26/31] mm/writeback: Add folio_wait_stable Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 27/31] mm/filemap: Add folio_wait_bit Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 28/31] mm/filemap: Add folio_wake_bit Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 29/31] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 30/31] mm/filemap: Add folio private_2 functions Matthew Wilcox (Oracle)
2021-04-30 18:07 ` [PATCH v8 31/31] fs/netfs: Add folio fscache functions Matthew Wilcox (Oracle)
2021-04-30 18:47 ` [PATCH v8.1 00/31] Memory Folios Hugh Dickins
2021-05-01  1:32   ` Nicholas Piggin
2021-05-01  2:37     ` Matthew Wilcox
2021-05-01 14:31       ` Matthew Wilcox
2021-05-01 21:38     ` John Hubbard
2021-05-02  0:17       ` Matthew Wilcox
2021-05-02  0:42         ` John Hubbard
2021-05-02  0:45           ` John Hubbard
2021-05-02  2:31           ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210430180740.2707166-19-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=hch@lst.de \
    --cc=jlayton@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).