linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linux-foundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, Christoph Hellwig <hch@lst.de>,
	Jeff Layton <jlayton@kernel.org>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	William Kucharski <william.kucharski@oracle.com>,
	David Howells <dhowells@redhat.com>
Subject: [PATCH v12 30/33] mm/filemap: Convert page wait queues to be folios
Date: Tue, 22 Jun 2021 12:41:15 +0100	[thread overview]
Message-ID: <20210622114118.3388190-31-willy@infradead.org> (raw)
In-Reply-To: <20210622114118.3388190-1-willy@infradead.org>

Reinforce that page flags are actually in the head page by changing the
type from page to folio.  Increases the size of cachefiles by two bytes,
but the kernel core is unchanged in size.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jeff Layton <jlayton@kernel.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: David Howells <dhowells@redhat.com>
---
 fs/cachefiles/rdwr.c    | 16 ++++++++--------
 include/linux/pagemap.h |  8 ++++----
 mm/filemap.c            | 38 +++++++++++++++++++-------------------
 3 files changed, 31 insertions(+), 31 deletions(-)

diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 8ffc40e84a59..e211a3d5ba44 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -25,20 +25,20 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
 	struct cachefiles_object *object;
 	struct fscache_retrieval *op = monitor->op;
 	struct wait_page_key *key = _key;
-	struct page *page = wait->private;
+	struct folio *folio = wait->private;
 
 	ASSERT(key);
 
 	_enter("{%lu},%u,%d,{%p,%u}",
 	       monitor->netfs_page->index, mode, sync,
-	       key->page, key->bit_nr);
+	       key->folio, key->bit_nr);
 
-	if (key->page != page || key->bit_nr != PG_locked)
+	if (key->folio != folio || key->bit_nr != PG_locked)
 		return 0;
 
-	_debug("--- monitor %p %lx ---", page, page->flags);
+	_debug("--- monitor %p %lx ---", folio, folio->flags);
 
-	if (!PageUptodate(page) && !PageError(page)) {
+	if (!folio_uptodate(folio) && !folio_error(folio)) {
 		/* unlocked, not uptodate and not erronous? */
 		_debug("page probably truncated");
 	}
@@ -107,7 +107,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
 	put_page(backpage2);
 
 	INIT_LIST_HEAD(&monitor->op_link);
-	add_page_wait_queue(backpage, &monitor->monitor);
+	folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
 
 	if (trylock_page(backpage)) {
 		ret = -EIO;
@@ -294,7 +294,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
 	get_page(backpage);
 	monitor->back_page = backpage;
 	monitor->monitor.private = backpage;
-	add_page_wait_queue(backpage, &monitor->monitor);
+	folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
 	monitor = NULL;
 
 	/* but the page may have been read before the monitor was installed, so
@@ -548,7 +548,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
 		get_page(backpage);
 		monitor->back_page = backpage;
 		monitor->monitor.private = backpage;
-		add_page_wait_queue(backpage, &monitor->monitor);
+		folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
 		monitor = NULL;
 
 		/* but the page may have been read before the monitor was
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 34180fca2892..1330d8d0ca26 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -628,13 +628,13 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 }
 
 struct wait_page_key {
-	struct page *page;
+	struct folio *folio;
 	int bit_nr;
 	int page_match;
 };
 
 struct wait_page_queue {
-	struct page *page;
+	struct folio *folio;
 	int bit_nr;
 	wait_queue_entry_t wait;
 };
@@ -642,7 +642,7 @@ struct wait_page_queue {
 static inline bool wake_page_match(struct wait_page_queue *wait_page,
 				  struct wait_page_key *key)
 {
-	if (wait_page->page != key->page)
+	if (wait_page->folio != key->folio)
 	       return false;
 	key->page_match = 1;
 
@@ -802,7 +802,7 @@ int wait_on_page_private_2_killable(struct page *page);
 /*
  * Add an arbitrary waiter to a page's wait queue
  */
-extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
+void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
 
 /*
  * Fault everything in given userspace address range in.
diff --git a/mm/filemap.c b/mm/filemap.c
index 64c3be60e1a1..7d8125f6c955 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1019,11 +1019,11 @@ EXPORT_SYMBOL(__page_cache_alloc);
  */
 #define PAGE_WAIT_TABLE_BITS 8
 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
-static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
+static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
 
-static wait_queue_head_t *page_waitqueue(struct page *page)
+static wait_queue_head_t *folio_waitqueue(struct folio *folio)
 {
-	return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
+	return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
 }
 
 void __init pagecache_init(void)
@@ -1031,7 +1031,7 @@ void __init pagecache_init(void)
 	int i;
 
 	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
-		init_waitqueue_head(&page_wait_table[i]);
+		init_waitqueue_head(&folio_wait_table[i]);
 
 	page_writeback_init();
 }
@@ -1086,10 +1086,10 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
 	 */
 	flags = wait->flags;
 	if (flags & WQ_FLAG_EXCLUSIVE) {
-		if (test_bit(key->bit_nr, &key->page->flags))
+		if (test_bit(key->bit_nr, &key->folio->flags))
 			return -1;
 		if (flags & WQ_FLAG_CUSTOM) {
-			if (test_and_set_bit(key->bit_nr, &key->page->flags))
+			if (test_and_set_bit(key->bit_nr, &key->folio->flags))
 				return -1;
 			flags |= WQ_FLAG_DONE;
 		}
@@ -1123,12 +1123,12 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
 
 static void folio_wake_bit(struct folio *folio, int bit_nr)
 {
-	wait_queue_head_t *q = page_waitqueue(&folio->page);
+	wait_queue_head_t *q = folio_waitqueue(folio);
 	struct wait_page_key key;
 	unsigned long flags;
 	wait_queue_entry_t bookmark;
 
-	key.page = &folio->page;
+	key.folio = folio;
 	key.bit_nr = bit_nr;
 	key.page_match = 0;
 
@@ -1220,7 +1220,7 @@ int sysctl_page_lock_unfairness = 5;
 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 		int state, enum behavior behavior)
 {
-	wait_queue_head_t *q = page_waitqueue(&folio->page);
+	wait_queue_head_t *q = folio_waitqueue(folio);
 	int unfairness = sysctl_page_lock_unfairness;
 	struct wait_page_queue wait_page;
 	wait_queue_entry_t *wait = &wait_page.wait;
@@ -1240,7 +1240,7 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 
 	init_wait(wait);
 	wait->func = wake_page_function;
-	wait_page.page = &folio->page;
+	wait_page.folio = folio;
 	wait_page.bit_nr = bit_nr;
 
 repeat:
@@ -1389,23 +1389,23 @@ int put_and_wait_on_page_locked(struct page *page, int state)
 }
 
 /**
- * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
- * @page: Page defining the wait queue of interest
+ * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
+ * @folio: Folio defining the wait queue of interest
  * @waiter: Waiter to add to the queue
  *
- * Add an arbitrary @waiter to the wait queue for the nominated @page.
+ * Add an arbitrary @waiter to the wait queue for the nominated @folio.
  */
-void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
+void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
 {
-	wait_queue_head_t *q = page_waitqueue(page);
+	wait_queue_head_t *q = folio_waitqueue(folio);
 	unsigned long flags;
 
 	spin_lock_irqsave(&q->lock, flags);
 	__add_wait_queue_entry_tail(q, waiter);
-	SetPageWaiters(page);
+	folio_set_waiters_flag(folio);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
-EXPORT_SYMBOL_GPL(add_page_wait_queue);
+EXPORT_SYMBOL_GPL(folio_add_wait_queue);
 
 #ifndef clear_bit_unlock_is_negative_byte
 
@@ -1595,10 +1595,10 @@ EXPORT_SYMBOL_GPL(__folio_lock_killable);
 
 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
 {
-	struct wait_queue_head *q = page_waitqueue(&folio->page);
+	struct wait_queue_head *q = folio_waitqueue(folio);
 	int ret = 0;
 
-	wait->page = &folio->page;
+	wait->folio = folio;
 	wait->bit_nr = PG_locked;
 
 	spin_lock_irq(&q->lock);
-- 
2.30.2


  parent reply	other threads:[~2021-06-22 12:08 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-22 11:40 [PATCH v12 00/33] Memory folios Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 01/33] mm: Convert get_page_unless_zero() to return bool Matthew Wilcox (Oracle)
2021-06-23  7:46   ` Christoph Hellwig
2021-06-28 12:35   ` Kirill A. Shutemov
2021-06-22 11:40 ` [PATCH v12 02/33] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 03/33] mm: Add folio_pgdat(), folio_zone() and folio_zonenum() Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 04/33] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 05/33] mm/debug: Add VM_BUG_ON_FOLIO() and VM_WARN_ON_ONCE_FOLIO() Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 06/33] mm: Add folio reference count functions Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 07/33] mm: Add folio_put() Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 08/33] mm: Add folio_get() Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 09/33] mm: Add folio_try_get_rcu() Matthew Wilcox (Oracle)
2021-06-28 12:38   ` Kirill A. Shutemov
2021-06-22 11:40 ` [PATCH v12 10/33] mm: Add folio flag manipulation functions Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 11/33] mm/lru: Add folio LRU functions Matthew Wilcox (Oracle)
2021-06-28 12:40   ` Kirill A. Shutemov
2021-06-22 11:40 ` [PATCH v12 12/33] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 13/33] mm/filemap: Add folio_index(), folio_file_page() and folio_contains() Matthew Wilcox (Oracle)
2021-06-22 11:40 ` [PATCH v12 14/33] mm/filemap: Add folio_next_index() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 15/33] mm/filemap: Add folio_pos() and folio_file_pos() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 16/33] mm/util: Add folio_mapping() and folio_file_mapping() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 17/33] mm/memcg: Add folio wrappers for various functions Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 18/33] mm/filemap: Add folio_unlock() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 19/33] mm/filemap: Add folio_lock() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 20/33] mm/filemap: Add folio_lock_killable() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 21/33] mm/filemap: Add __folio_lock_async() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 22/33] mm/filemap: Add folio_wait_locked() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 23/33] mm/filemap: Add __folio_lock_or_retry() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 24/33] mm/swap: Add folio_rotate_reclaimable() Matthew Wilcox (Oracle)
2021-06-28 12:46   ` Kirill A. Shutemov
2021-06-22 11:41 ` [PATCH v12 25/33] mm/filemap: Add folio_end_writeback() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 26/33] mm/writeback: Add folio_wait_writeback() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 27/33] mm/writeback: Add folio_wait_stable() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 28/33] mm/filemap: Add folio_wait_bit() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` [PATCH v12 29/33] mm/filemap: Add folio_wake_bit() Matthew Wilcox (Oracle)
2021-06-22 11:41 ` Matthew Wilcox (Oracle) [this message]
2021-06-22 11:41 ` [PATCH v12 31/33] mm/filemap: Add folio private_2 functions Matthew Wilcox (Oracle)
2021-06-28 12:47   ` Kirill A. Shutemov
2021-06-22 11:41 ` [PATCH v12 32/33] fs/netfs: Add folio fscache functions Matthew Wilcox (Oracle)
2021-06-28 12:49   ` Kirill A. Shutemov
2021-06-22 11:41 ` [PATCH v12 33/33] mm: Add folio_mapped() Matthew Wilcox (Oracle)
2021-06-28 12:51   ` Kirill A. Shutemov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210622114118.3388190-31-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=dhowells@redhat.com \
    --cc=hch@lst.de \
    --cc=jlayton@kernel.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=vbabka@suse.cz \
    --cc=william.kucharski@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).