All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linux-foundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, Christoph Hellwig <hch@lst.de>,
	Jeff Layton <jlayton@kernel.org>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	William Kucharski <william.kucharski@oracle.com>
Subject: [PATCH v11 19/33] mm/filemap: Add folio_lock()
Date: Mon, 14 Jun 2021 21:14:21 +0100	[thread overview]
Message-ID: <20210614201435.1379188-20-willy@infradead.org> (raw)
In-Reply-To: <20210614201435.1379188-1-willy@infradead.org>

This is like lock_page() but for use by callers who know they have a folio.
Convert __lock_page() to be __folio_lock().  This saves one call to
compound_head() per contended call to lock_page().

Saves 455 bytes of text; mostly from improved register allocation and
inlining decisions.  __folio_lock is 59 bytes while __lock_page was 79.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jeff Layton <jlayton@kernel.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 include/linux/pagemap.h | 24 +++++++++++++++++++-----
 mm/filemap.c            | 29 +++++++++++++++--------------
 2 files changed, 34 insertions(+), 19 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9faf19f4c9f9..e61b237df1c1 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -641,7 +641,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
 	return true;
 }
 
-extern void __lock_page(struct page *page);
+void __folio_lock(struct folio *folio);
 extern int __lock_page_killable(struct page *page);
 extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
@@ -649,13 +649,24 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 void unlock_page(struct page *page);
 void folio_unlock(struct folio *folio);
 
+static inline bool folio_trylock(struct folio *folio)
+{
+	return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+}
+
 /*
  * Return true if the page was successfully locked
  */
 static inline int trylock_page(struct page *page)
 {
-	page = compound_head(page);
-	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+	return folio_trylock(page_folio(page));
+}
+
+static inline void folio_lock(struct folio *folio)
+{
+	might_sleep();
+	if (!folio_trylock(folio))
+		__folio_lock(folio);
 }
 
 /*
@@ -663,9 +674,12 @@ static inline int trylock_page(struct page *page)
  */
 static inline void lock_page(struct page *page)
 {
+	struct folio *folio;
 	might_sleep();
-	if (!trylock_page(page))
-		__lock_page(page);
+
+	folio = page_folio(page);
+	if (!folio_trylock(folio))
+		__folio_lock(folio);
 }
 
 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 4cbed91d456a..a89227132141 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1187,7 +1187,7 @@ static void wake_up_page(struct page *page, int bit)
  */
 enum behavior {
 	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
-			 * __lock_page() waiting on then setting PG_locked.
+			 * __folio_lock() waiting on then setting PG_locked.
 			 */
 	SHARED,		/* Hold ref to page and check the bit when woken, like
 			 * wait_on_page_writeback() waiting on PG_writeback.
@@ -1578,17 +1578,16 @@ void page_endio(struct page *page, bool is_write, int err)
 EXPORT_SYMBOL_GPL(page_endio);
 
 /**
- * __lock_page - get a lock on the page, assuming we need to sleep to get it
- * @__page: the page to lock
+ * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
+ * @folio: The folio to lock
  */
-void __lock_page(struct page *__page)
+void __folio_lock(struct folio *folio)
 {
-	struct page *page = compound_head(__page);
-	wait_queue_head_t *q = page_waitqueue(page);
-	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
+	wait_queue_head_t *q = page_waitqueue(&folio->page);
+	wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
 				EXCLUSIVE);
 }
-EXPORT_SYMBOL(__lock_page);
+EXPORT_SYMBOL(__folio_lock);
 
 int __lock_page_killable(struct page *__page)
 {
@@ -1663,10 +1662,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 			return 0;
 		}
 	} else {
-		__lock_page(page);
+		__folio_lock(page_folio(page));
 	}
-	return 1;
 
+	return 1;
 }
 
 /**
@@ -2837,7 +2836,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 				     struct file **fpin)
 {
-	if (trylock_page(page))
+	struct folio *folio = page_folio(page);
+
+	if (folio_trylock(folio))
 		return 1;
 
 	/*
@@ -2850,7 +2851,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 
 	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
 	if (vmf->flags & FAULT_FLAG_KILLABLE) {
-		if (__lock_page_killable(page)) {
+		if (__lock_page_killable(&folio->page)) {
 			/*
 			 * We didn't have the right flags to drop the mmap_lock,
 			 * but all fault_handlers only check for fatal signals
@@ -2862,11 +2863,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 			return 0;
 		}
 	} else
-		__lock_page(page);
+		__folio_lock(folio);
+
 	return 1;
 }
 
-
 /*
  * Synchronous readahead happens when we don't even find a page in the page
  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
-- 
2.30.2


  parent reply	other threads:[~2021-06-14 20:27 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-14 20:14 [PATCH v11 00/33] Memory folios Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 01/33] mm: Convert get_page_unless_zero() to return bool Matthew Wilcox (Oracle)
2021-06-15  6:24   ` Christoph Hellwig
2021-06-14 20:14 ` [PATCH v11 02/33] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 03/33] mm: Add folio_pgdat(), folio_zone() and folio_zonenum() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 04/33] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 05/33] mm/debug: Add VM_BUG_ON_FOLIO() and VM_WARN_ON_ONCE_FOLIO() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 06/33] mm: Add folio reference count functions Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 07/33] mm: Add folio_put() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 08/33] mm: Add folio_get() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 09/33] mm: Add folio_try_get_rcu() Matthew Wilcox (Oracle)
2021-06-15  6:25   ` Christoph Hellwig
2021-06-14 20:14 ` [PATCH v11 10/33] mm: Add folio flag manipulation functions Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 11/33] mm/lru: Add folio LRU functions Matthew Wilcox (Oracle)
2021-06-15  6:27   ` Christoph Hellwig
2021-06-14 20:14 ` [PATCH v11 12/33] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 13/33] mm/filemap: Add folio_index(), folio_file_page() and folio_contains() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 14/33] mm/filemap: Add folio_next_index() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 15/33] mm/filemap: Add folio_offset() and folio_file_offset() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 16/33] mm/util: Add folio_mapping() and folio_file_mapping() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 17/33] mm/memcg: Add folio wrappers for various functions Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 18/33] mm/filemap: Add folio_unlock() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` Matthew Wilcox (Oracle) [this message]
2021-06-14 20:14 ` [PATCH v11 20/33] mm/filemap: Add folio_lock_killable() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 21/33] mm/filemap: Add __folio_lock_async() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 22/33] mm/filemap: Add folio_wait_locked() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 23/33] mm/filemap: Add __folio_lock_or_retry() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 24/33] mm/swap: Add folio_rotate_reclaimable() Matthew Wilcox (Oracle)
2021-06-15  6:29   ` Christoph Hellwig
2021-06-14 20:14 ` [PATCH v11 25/33] mm/filemap: Add folio_end_writeback() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 26/33] mm/writeback: Add folio_wait_writeback() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 27/33] mm/writeback: Add folio_wait_stable() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 28/33] mm/filemap: Add folio_wait_bit() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 29/33] mm/filemap: Add folio_wake_bit() Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 30/33] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 31/33] mm/filemap: Add folio private_2 functions Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 32/33] fs/netfs: Add folio fscache functions Matthew Wilcox (Oracle)
2021-06-14 20:14 ` [PATCH v11 33/33] mm: Add folio_mapped() Matthew Wilcox (Oracle)
2021-06-15  6:31   ` Christoph Hellwig
2021-06-16  9:16 ` [PATCH v11 32/33] fs/netfs: Add folio fscache functions David Howells
2021-06-16  9:19 ` [PATCH v11 01/33] mm: Convert get_page_unless_zero() to return bool David Howells
2021-06-16  9:31 ` [PATCH v11 02/33] mm: Introduce struct folio David Howells
2021-06-16  9:36 ` [PATCH v11 04/33] mm/vmstat: Add functions to account folio statistics David Howells
2021-06-16  9:54 ` [PATCH v11 05/33] mm/debug: Add VM_BUG_ON_FOLIO() and VM_WARN_ON_ONCE_FOLIO() David Howells
2021-06-16  9:55 ` [PATCH v11 06/33] mm: Add folio reference count functions David Howells
2021-06-16  9:55 ` [PATCH v11 07/33] mm: Add folio_put() David Howells
2021-06-16  9:56 ` [PATCH v11 08/33] mm: Add folio_get() David Howells
2021-06-16  9:58 ` [PATCH v11 10/33] mm: Add folio flag manipulation functions David Howells
2021-06-16 10:00 ` [PATCH v11 11/33] mm/lru: Add folio LRU functions David Howells
2021-06-16 10:03 ` [PATCH v11 13/33] mm/filemap: Add folio_index(), folio_file_page() and folio_contains() David Howells
2021-06-16 11:38   ` Matthew Wilcox
2021-06-16 10:04 ` [PATCH v11 14/33] mm/filemap: Add folio_next_index() David Howells
2021-06-16 10:05 ` [PATCH v11 15/33] mm/filemap: Add folio_offset() and folio_file_offset() David Howells
2021-06-16 10:10 ` [PATCH v11 16/33] mm/util: Add folio_mapping() and folio_file_mapping() David Howells
2021-06-16 10:13 ` [PATCH v11 18/33] mm/filemap: Add folio_unlock() David Howells
2021-06-16 10:17 ` [PATCH v11 19/33] mm/filemap: Add folio_lock() David Howells
2021-06-16 10:22 ` [PATCH v11 21/33] mm/filemap: Add __folio_lock_async() David Howells
2021-06-16 10:23 ` [PATCH v11 22/33] mm/filemap: Add folio_wait_locked() David Howells
2021-06-16 10:27 ` [PATCH v11 24/33] mm/swap: Add folio_rotate_reclaimable() David Howells
2021-06-16 11:46   ` Matthew Wilcox
2021-06-16 10:30 ` [PATCH v11 26/33] mm/writeback: Add folio_wait_writeback() David Howells
2021-06-16 11:55   ` Matthew Wilcox
2021-06-16 10:32 ` [PATCH v11 27/33] mm/writeback: Add folio_wait_stable() David Howells
2021-06-16 10:33 ` [PATCH v11 28/33] mm/filemap: Add folio_wait_bit() David Howells
2021-06-16 10:35 ` [PATCH v11 30/33] mm/filemap: Convert page wait queues to be folios David Howells
2021-06-16 10:37 ` [PATCH v11 33/33] mm: Add folio_mapped() David Howells
2021-06-16 10:41 ` [PATCH v11 02/33] mm: Introduce struct folio David Howells
2021-06-16 10:54 ` [PATCH v11 00/33] Memory folios David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210614201435.1379188-20-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=hch@lst.de \
    --cc=jlayton@kernel.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=vbabka@suse.cz \
    --cc=william.kucharski@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.