All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-cachefs@redhat.com, linux-afs@lists.infradead.org
Subject: [PATCH v5 16/27] mm/filemap: Add lock_folio
Date: Sat, 20 Mar 2021 05:40:53 +0000	[thread overview]
Message-ID: <20210320054104.1300774-17-willy@infradead.org> (raw)
In-Reply-To: <20210320054104.1300774-1-willy@infradead.org>

This is like lock_page() but for use by callers who know they have a folio.
Convert __lock_page() to be __lock_folio().  This saves one call to
compound_head() per contended call to lock_page().

Saves 362 bytes of text; mostly from improved register allocation and
inlining decisions.  __lock_folio is 59 bytes while __lock_page was 79.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/pagemap.h | 24 +++++++++++++++++++-----
 mm/filemap.c            | 29 +++++++++++++++--------------
 2 files changed, 34 insertions(+), 19 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c211868086e0..c96ba0dfe111 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -693,7 +693,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
 	return true;
 }
 
-extern void __lock_page(struct page *page);
+void __lock_folio(struct folio *folio);
 extern int __lock_page_killable(struct page *page);
 extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
@@ -702,13 +702,24 @@ void unlock_page(struct page *page);
 void unlock_folio(struct folio *folio);
 void unlock_page_private_2(struct page *page);
 
+static inline bool trylock_folio(struct folio *folio)
+{
+	return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+}
+
 /*
  * Return true if the page was successfully locked
  */
 static inline int trylock_page(struct page *page)
 {
-	page = compound_head(page);
-	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+	return trylock_folio(page_folio(page));
+}
+
+static inline void lock_folio(struct folio *folio)
+{
+	might_sleep();
+	if (!trylock_folio(folio))
+		__lock_folio(folio);
 }
 
 /*
@@ -716,9 +727,12 @@ static inline int trylock_page(struct page *page)
  */
 static inline void lock_page(struct page *page)
 {
+	struct folio *folio;
 	might_sleep();
-	if (!trylock_page(page))
-		__lock_page(page);
+
+	folio = page_folio(page);
+	if (!trylock_folio(folio))
+		__lock_folio(folio);
 }
 
 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 47ac8126a12e..99c05e2c0eea 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1187,7 +1187,7 @@ static void wake_up_page(struct page *page, int bit)
  */
 enum behavior {
 	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
-			 * __lock_page() waiting on then setting PG_locked.
+			 * __lock_folio() waiting on then setting PG_locked.
 			 */
 	SHARED,		/* Hold ref to page and check the bit when woken, like
 			 * wait_on_page_writeback() waiting on PG_writeback.
@@ -1535,17 +1535,16 @@ void page_endio(struct page *page, bool is_write, int err)
 EXPORT_SYMBOL_GPL(page_endio);
 
 /**
- * __lock_page - get a lock on the page, assuming we need to sleep to get it
- * @__page: the page to lock
+ * __lock_folio - Get a lock on the folio, assuming we need to sleep to get it.
+ * @folio: The folio to lock
  */
-void __lock_page(struct page *__page)
+void __lock_folio(struct folio *folio)
 {
-	struct page *page = compound_head(__page);
-	wait_queue_head_t *q = page_waitqueue(page);
-	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
+	wait_queue_head_t *q = page_waitqueue(&folio->page);
+	wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
 				EXCLUSIVE);
 }
-EXPORT_SYMBOL(__lock_page);
+EXPORT_SYMBOL(__lock_folio);
 
 int __lock_page_killable(struct page *__page)
 {
@@ -1620,10 +1619,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 			return 0;
 		}
 	} else {
-		__lock_page(page);
+		__lock_folio(page_folio(page));
 	}
-	return 1;
 
+	return 1;
 }
 
 /**
@@ -2767,7 +2766,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 				     struct file **fpin)
 {
-	if (trylock_page(page))
+	struct folio *folio = page_folio(page);
+
+	if (trylock_folio(folio))
 		return 1;
 
 	/*
@@ -2780,7 +2781,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 
 	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
 	if (vmf->flags & FAULT_FLAG_KILLABLE) {
-		if (__lock_page_killable(page)) {
+		if (__lock_page_killable(&folio->page)) {
 			/*
 			 * We didn't have the right flags to drop the mmap_lock,
 			 * but all fault_handlers only check for fatal signals
@@ -2792,11 +2793,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 			return 0;
 		}
 	} else
-		__lock_page(page);
+		__lock_folio(folio);
+
 	return 1;
 }
 
-
 /*
  * Synchronous readahead happens when we don't even find a page in the page
  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
-- 
2.30.2


  parent reply	other threads:[~2021-03-20  5:45 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-20  5:40 [PATCH v5 00/27] Memory Folios Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 01/27] fs/cachefiles: Remove wait_bit_key layout dependency Matthew Wilcox (Oracle)
2021-03-22  8:06   ` Christoph Hellwig
2021-03-20  5:40 ` [PATCH v5 02/27] mm/writeback: Add wait_on_page_writeback_killable Matthew Wilcox (Oracle)
2021-03-22  8:07   ` Christoph Hellwig
2021-03-20  5:40 ` [PATCH v5 03/27] afs: Use wait_on_page_writeback_killable Matthew Wilcox (Oracle)
2021-03-22  8:08   ` Christoph Hellwig
2021-03-20  5:40 ` [PATCH v5 04/27] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 05/27] mm: Add folio_pgdat and folio_zone Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 06/27] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 07/27] mm/debug: Add VM_BUG_ON_FOLIO and VM_WARN_ON_ONCE_FOLIO Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 08/27] mm: Add put_folio Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 09/27] mm: Add get_folio Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 10/27] mm: Create FolioFlags Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 11/27] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 12/27] mm: Add folio_index, folio_file_page and folio_contains Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 13/27] mm/util: Add folio_mapping and folio_file_mapping Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 14/27] mm/memcg: Add folio wrappers for various functions Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 15/27] mm/filemap: Add unlock_folio Matthew Wilcox (Oracle)
2021-03-20  5:40 ` Matthew Wilcox (Oracle) [this message]
2021-03-20  5:40 ` [PATCH v5 17/27] mm/filemap: Add lock_folio_killable Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 18/27] mm/filemap: Add __lock_folio_async Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 19/27] mm/filemap: Add __lock_folio_or_retry Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 20/27] mm/filemap: Add wait_on_folio_locked Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 21/27] mm/filemap: Add end_folio_writeback Matthew Wilcox (Oracle)
2021-03-20  5:40 ` [PATCH v5 22/27] mm/writeback: Add wait_on_folio_writeback Matthew Wilcox (Oracle)
2021-03-20  5:41 ` [PATCH v5 23/27] mm/writeback: Add wait_for_stable_folio Matthew Wilcox (Oracle)
2021-03-20  5:41 ` [PATCH v5 24/27] mm/filemap: Convert wait_on_page_bit to wait_on_folio_bit Matthew Wilcox (Oracle)
2021-03-21  7:10   ` kernel test robot
2021-03-21  7:10     ` kernel test robot
2021-03-20  5:41 ` [PATCH v5 25/27] mm/filemap: Convert wake_up_page_bit to wake_up_folio_bit Matthew Wilcox (Oracle)
2021-03-20  5:41 ` [PATCH v5 26/27] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-03-20  7:54   ` kernel test robot
2021-03-20  7:54     ` kernel test robot
2021-03-20  5:41 ` [PATCH v5 27/27] mm/doc: Build kerneldoc for various mm files Matthew Wilcox (Oracle)
2021-03-22  3:25 ` [PATCH v5 00/27] Memory Folios Matthew Wilcox
2021-03-22  9:25 ` [PATCH v5 01/27] fs/cachefiles: Remove wait_bit_key layout dependency David Howells
2021-03-22  9:26 ` [PATCH v5 02/27] mm/writeback: Add wait_on_page_writeback_killable David Howells
2021-03-22  9:27 ` [PATCH v5 03/27] afs: Use wait_on_page_writeback_killable David Howells
2021-03-22 19:41   ` Matthew Wilcox
2021-03-22 17:59 ` [PATCH v5 00/27] Memory Folios Johannes Weiner
2021-03-22 18:47   ` Matthew Wilcox
2021-03-24  0:29     ` Johannes Weiner
2021-03-24  6:24       ` Matthew Wilcox
2021-03-26 17:48         ` Johannes Weiner
2021-03-29 16:58           ` Matthew Wilcox
2021-03-29 17:56             ` Matthew Wilcox
2021-03-30 19:30             ` Johannes Weiner
2021-03-30 21:09               ` Matthew Wilcox
2021-03-31 18:14                 ` Johannes Weiner
2021-03-31 18:28                   ` Matthew Wilcox
2021-04-01  5:05                 ` Al Viro
2021-04-01 12:07                   ` Matthew Wilcox
2021-04-01 16:00                   ` Johannes Weiner
2021-03-31 14:54               ` Christoph Hellwig
2021-03-23 15:50   ` Christoph Hellwig
2021-03-23 11:29 ` [PATCH v5 03/27] afs: Use wait_on_page_writeback_killable David Howells
2021-03-23 17:50 ` [PATCH v5 00/27] Memory Folios David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210320054104.1300774-17-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-cachefs@redhat.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.