All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH 06/25] mm: Add lock_folio
Date: Wed, 16 Dec 2020 18:23:16 +0000	[thread overview]
Message-ID: <20201216182335.27227-7-willy@infradead.org> (raw)
In-Reply-To: <20201216182335.27227-1-willy@infradead.org>

This is like lock_page() but for use by callers who know they have a folio.
Convert __lock_page() to be __lock_folio().  This saves one call to
compound_head() per contended call to lock_page().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/pagemap.h | 21 +++++++++++++++------
 mm/filemap.c            | 29 +++++++++++++++--------------
 2 files changed, 30 insertions(+), 20 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 83786e7eeb23..c5fe759872b5 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -599,7 +599,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
 	return true;
 }
 
-extern void __lock_page(struct page *page);
+extern void __lock_folio(struct folio *folio);
 extern int __lock_page_killable(struct page *page);
 extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
@@ -620,13 +620,24 @@ static inline void unlock_page(struct page *page)
 	return unlock_folio(page_folio(page));
 }
 
+static inline bool trylock_folio(struct folio *folio)
+{
+	return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio)));
+}
+
 /*
  * Return true if the page was successfully locked
  */
 static inline int trylock_page(struct page *page)
 {
-	page = compound_head(page);
-	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+	return trylock_folio(page_folio(page));
+}
+
+static inline void lock_folio(struct folio *folio)
+{
+	might_sleep();
+	if (!trylock_folio(folio))
+		__lock_folio(folio);
 }
 
 /*
@@ -634,9 +645,7 @@ static inline int trylock_page(struct page *page)
  */
 static inline void lock_page(struct page *page)
 {
-	might_sleep();
-	if (!trylock_page(page))
-		__lock_page(page);
+	lock_folio(page_folio(page));
 }
 
 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 8af89ecc1452..50fdc03590b3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1160,7 +1160,7 @@ static void wake_up_page(struct page *page, int bit)
  */
 enum behavior {
 	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
-			 * __lock_page() waiting on then setting PG_locked.
+			 * __lock_folio() waiting on then setting PG_locked.
 			 */
 	SHARED,		/* Hold ref to page and check the bit when woken, like
 			 * wait_on_page_writeback() waiting on PG_writeback.
@@ -1523,17 +1523,16 @@ void page_endio(struct page *page, bool is_write, int err)
 EXPORT_SYMBOL_GPL(page_endio);
 
 /**
- * __lock_page - get a lock on the page, assuming we need to sleep to get it
- * @__page: the page to lock
+ * __lock_folio - Get a lock on the folio, assuming we need to sleep to get it.
+ * @folio: The folio to lock
  */
-void __lock_page(struct page *__page)
+void __lock_folio(struct folio *folio)
 {
-	struct page *page = compound_head(__page);
-	wait_queue_head_t *q = page_waitqueue(page);
-	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
+	wait_queue_head_t *q = page_waitqueue(&folio->page);
+	wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
 				EXCLUSIVE);
 }
-EXPORT_SYMBOL(__lock_page);
+EXPORT_SYMBOL(__lock_folio);
 
 int __lock_page_killable(struct page *__page)
 {
@@ -1587,10 +1586,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 			return 0;
 		}
 	} else {
-		__lock_page(page);
+		__lock_folio(page_folio(page));
 	}
-	return 1;
 
+	return 1;
 }
 
 /**
@@ -2764,7 +2763,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 				     struct file **fpin)
 {
-	if (trylock_page(page))
+	struct folio *folio = page_folio(page);
+
+	if (trylock_folio(folio))
 		return 1;
 
 	/*
@@ -2777,7 +2778,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 
 	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
 	if (vmf->flags & FAULT_FLAG_KILLABLE) {
-		if (__lock_page_killable(page)) {
+		if (__lock_page_killable(&folio->page)) {
 			/*
 			 * We didn't have the right flags to drop the mmap_lock,
 			 * but all fault_handlers only check for fatal signals
@@ -2789,11 +2790,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 			return 0;
 		}
 	} else
-		__lock_page(page);
+		__lock_folio(folio);
+
 	return 1;
 }
 
-
 /*
  * Synchronous readahead happens when we don't even find a page in the page
  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
-- 
2.29.2


  parent reply	other threads:[~2020-12-16 18:26 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-16 18:23 [PATCH 00/25] Page folios Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 01/25] mm: Introduce struct folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 02/25] mm: Add put_folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 03/25] mm: Add get_folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 04/25] mm: Create FolioFlags Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 05/25] mm: Add unlock_folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` Matthew Wilcox (Oracle) [this message]
2020-12-16 18:23 ` [PATCH 07/25] mm: Add lock_folio_killable Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 08/25] mm: Add __alloc_folio_node and alloc_folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 09/25] mm: Convert __page_cache_alloc to return a folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 10/25] mm/filemap: Convert end_page_writeback to use " Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 11/25] mm: Convert mapping_get_entry to return " Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 12/25] mm: Add mark_folio_accessed Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 13/25] mm: Add filemap_get_folio and find_get_folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 14/25] mm/filemap: Add folio_add_to_page_cache Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 15/25] mm/swap: Convert rotate_reclaimable_page to folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 16/25] mm: Add folio_mapping Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 17/25] mm: Rename THP_SUPPORT to MULTI_PAGE_FOLIOS Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 18/25] btrfs: Use readahead_batch_length Matthew Wilcox (Oracle)
2020-12-17  9:15   ` John Hubbard
2020-12-17 12:12     ` Matthew Wilcox
2020-12-17 13:42       ` Matthew Wilcox
2020-12-17 19:36         ` John Hubbard
2020-12-16 18:23 ` [PATCH 19/25] fs: Change page refcount rules for readahead Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 20/25] fs: Change readpage to take a folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 21/25] mm: Convert wait_on_page_bit to wait_on_folio_bit Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 22/25] mm: Add wait_on_folio_locked & wait_on_folio_locked_killable Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 23/25] mm: Add flush_dcache_folio Matthew Wilcox (Oracle)
2020-12-16 20:59   ` kernel test robot
2020-12-16 20:59     ` kernel test robot
2020-12-16 22:01     ` Matthew Wilcox
2020-12-16 22:01       ` Matthew Wilcox
2020-12-16 18:23 ` [PATCH 24/25] mm: Add read_cache_folio and read_mapping_folio Matthew Wilcox (Oracle)
2020-12-16 18:23 ` [PATCH 25/25] fs: Convert vfs_dedupe_file_range_compare to folios Matthew Wilcox (Oracle)
2020-12-17 12:47 ` [PATCH 00/25] Page folios David Hildenbrand
2020-12-17 13:55   ` Matthew Wilcox
2020-12-17 14:35     ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201216182335.27227-7-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.