All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linuxfoundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>, linux-mm@kvack.org
Subject: [PATCH 21/21] mm/shmem: Convert shmem_swapin_page() to shmem_swapin_folio()
Date: Fri, 29 Apr 2022 20:23:29 +0100	[thread overview]
Message-ID: <20220429192329.3034378-22-willy@infradead.org> (raw)
In-Reply-To: <20220429192329.3034378-1-willy@infradead.org>

shmem_swapin_page() only brings in order-0 pages, which are folios
by definition.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 arch/arm64/include/asm/pgtable.h |   6 +-
 include/linux/pgtable.h          |   2 +-
 mm/shmem.c                       | 108 ++++++++++++++-----------------
 3 files changed, 54 insertions(+), 62 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index dff2b483ea50..27cb6a355fb0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -964,10 +964,10 @@ static inline void arch_swap_invalidate_area(int type)
 }
 
 #define __HAVE_ARCH_SWAP_RESTORE
-static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
+static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
 {
-	if (system_supports_mte() && mte_restore_tags(entry, page))
-		set_bit(PG_mte_tagged, &page->flags);
+	if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
+		set_bit(PG_mte_tagged, &folio->flags);
 }
 
 #endif /* CONFIG_ARM64_MTE */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index f4f4077b97aa..a1c44b015463 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -738,7 +738,7 @@ static inline void arch_swap_invalidate_area(int type)
 #endif
 
 #ifndef __HAVE_ARCH_SWAP_RESTORE
-static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
+static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
 {
 }
 #endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 7457f352cf9f..673a0e783496 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -134,8 +134,8 @@ static unsigned long shmem_default_max_inodes(void)
 }
 #endif
 
-static int shmem_swapin_page(struct inode *inode, pgoff_t index,
-			     struct page **pagep, enum sgp_type sgp,
+static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+			     struct folio **foliop, enum sgp_type sgp,
 			     gfp_t gfp, struct vm_area_struct *vma,
 			     vm_fault_t *fault_type);
 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
@@ -1158,69 +1158,64 @@ static void shmem_evict_inode(struct inode *inode)
 }
 
 static int shmem_find_swap_entries(struct address_space *mapping,
-				   pgoff_t start, unsigned int nr_entries,
-				   struct page **entries, pgoff_t *indices,
-				   unsigned int type)
+				   pgoff_t start, struct folio_batch *fbatch,
+				   pgoff_t *indices, unsigned int type)
 {
 	XA_STATE(xas, &mapping->i_pages, start);
-	struct page *page;
+	struct folio *folio;
 	swp_entry_t entry;
 	unsigned int ret = 0;
 
-	if (!nr_entries)
-		return 0;
-
 	rcu_read_lock();
-	xas_for_each(&xas, page, ULONG_MAX) {
-		if (xas_retry(&xas, page))
+	xas_for_each(&xas, folio, ULONG_MAX) {
+		if (xas_retry(&xas, folio))
 			continue;
 
-		if (!xa_is_value(page))
+		if (!xa_is_value(folio))
 			continue;
 
-		entry = radix_to_swp_entry(page);
+		entry = radix_to_swp_entry(folio);
 		if (swp_type(entry) != type)
 			continue;
 
 		indices[ret] = xas.xa_index;
-		entries[ret] = page;
+		if (!folio_batch_add(fbatch, folio))
+			break;
 
 		if (need_resched()) {
 			xas_pause(&xas);
 			cond_resched_rcu();
 		}
-		if (++ret == nr_entries)
-			break;
 	}
 	rcu_read_unlock();
 
-	return ret;
+	return xas.xa_index;
 }
 
 /*
  * Move the swapped pages for an inode to page cache. Returns the count
  * of pages swapped in, or the error in case of failure.
  */
-static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
-				    pgoff_t *indices)
+static int shmem_unuse_swap_entries(struct inode *inode,
+		struct folio_batch *fbatch, pgoff_t *indices)
 {
 	int i = 0;
 	int ret = 0;
 	int error = 0;
 	struct address_space *mapping = inode->i_mapping;
 
-	for (i = 0; i < pvec.nr; i++) {
-		struct page *page = pvec.pages[i];
+	for (i = 0; i < folio_batch_count(fbatch); i++) {
+		struct folio *folio = fbatch->folios[i];
 
-		if (!xa_is_value(page))
+		if (!xa_is_value(folio))
 			continue;
-		error = shmem_swapin_page(inode, indices[i],
-					  &page, SGP_CACHE,
+		error = shmem_swapin_folio(inode, indices[i],
+					  &folio, SGP_CACHE,
 					  mapping_gfp_mask(mapping),
 					  NULL, NULL);
 		if (error == 0) {
-			unlock_page(page);
-			put_page(page);
+			folio_unlock(folio);
+			folio_put(folio);
 			ret++;
 		}
 		if (error == -ENOMEM)
@@ -1237,26 +1232,23 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type)
 {
 	struct address_space *mapping = inode->i_mapping;
 	pgoff_t start = 0;
-	struct pagevec pvec;
+	struct folio_batch fbatch;
 	pgoff_t indices[PAGEVEC_SIZE];
 	int ret = 0;
 
-	pagevec_init(&pvec);
 	do {
-		unsigned int nr_entries = PAGEVEC_SIZE;
-
-		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
-						  pvec.pages, indices, type);
-		if (pvec.nr == 0) {
+		folio_batch_init(&fbatch);
+		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
+		if (folio_batch_count(&fbatch) == 0) {
 			ret = 0;
 			break;
 		}
 
-		ret = shmem_unuse_swap_entries(inode, pvec, indices);
+		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
 		if (ret < 0)
 			break;
 
-		start = indices[pvec.nr - 1];
+		start = indices[folio_batch_count(&fbatch) - 1];
 	} while (true);
 
 	return ret;
@@ -1680,22 +1672,22 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
  * Returns 0 and the page in pagep if success. On failure, returns the
  * error code and NULL in *pagep.
  */
-static int shmem_swapin_page(struct inode *inode, pgoff_t index,
-			     struct page **pagep, enum sgp_type sgp,
+static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
+			     struct folio **foliop, enum sgp_type sgp,
 			     gfp_t gfp, struct vm_area_struct *vma,
 			     vm_fault_t *fault_type)
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct shmem_inode_info *info = SHMEM_I(inode);
 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
-	struct page *page = NULL;
+	struct page *page;
 	struct folio *folio;
 	swp_entry_t swap;
 	int error;
 
-	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
-	swap = radix_to_swp_entry(*pagep);
-	*pagep = NULL;
+	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
+	swap = radix_to_swp_entry(*foliop);
+	*foliop = NULL;
 
 	/* Look it up and read it in.. */
 	page = lookup_swap_cache(swap, NULL, 0);
@@ -1713,27 +1705,28 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
 			goto failed;
 		}
 	}
+	folio = page_folio(page);
 
 	/* We have to do this with page locked to prevent races */
-	lock_page(page);
-	if (!PageSwapCache(page) || page_private(page) != swap.val ||
+	folio_lock(folio);
+	if (!folio_test_swapcache(folio) ||
+	    folio_swap_entry(folio).val != swap.val ||
 	    !shmem_confirm_swap(mapping, index, swap)) {
 		error = -EEXIST;
 		goto unlock;
 	}
-	if (!PageUptodate(page)) {
+	if (!folio_test_uptodate(folio)) {
 		error = -EIO;
 		goto failed;
 	}
-	wait_on_page_writeback(page);
+	folio_wait_writeback(folio);
 
 	/*
 	 * Some architectures may have to restore extra metadata to the
-	 * physical page after reading from swap.
+	 * folio after reading from swap.
 	 */
-	arch_swap_restore(swap, page);
+	arch_swap_restore(swap, folio);
 
-	folio = page_folio(page);
 	if (shmem_should_replace_folio(folio, gfp)) {
 		error = shmem_replace_page(&page, gfp, info, index);
 		if (error)
@@ -1752,21 +1745,21 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
 	spin_unlock_irq(&info->lock);
 
 	if (sgp == SGP_WRITE)
-		mark_page_accessed(page);
+		folio_mark_accessed(folio);
 
-	delete_from_swap_cache(page);
-	set_page_dirty(page);
+	delete_from_swap_cache(&folio->page);
+	folio_mark_dirty(folio);
 	swap_free(swap);
 
-	*pagep = page;
+	*foliop = folio;
 	return 0;
 failed:
 	if (!shmem_confirm_swap(mapping, index, swap))
 		error = -EEXIST;
 unlock:
-	if (page) {
-		unlock_page(page);
-		put_page(page);
+	if (folio) {
+		folio_unlock(folio);
+		folio_put(folio);
 	}
 
 	return error;
@@ -1820,13 +1813,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 	}
 
 	if (xa_is_value(folio)) {
-		struct page *page = &folio->page;
-		error = shmem_swapin_page(inode, index, &page,
+		error = shmem_swapin_folio(inode, index, &folio,
 					  sgp, gfp, vma, fault_type);
 		if (error == -EEXIST)
 			goto repeat;
 
-		*pagep = page;
+		*pagep = &folio->page;
 		return error;
 	}
 
-- 
2.34.1



  parent reply	other threads:[~2022-04-29 19:23 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-29 19:23 [PATCH 00/21] Folio patches for 5.19 Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 01/21] shmem: Convert shmem_alloc_hugepage() to use vma_alloc_folio() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 02/21] mm/huge_memory: Convert do_huge_pmd_anonymous_page() " Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 03/21] mm: Remove alloc_pages_vma() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 04/21] vmscan: Use folio_mapped() in shrink_page_list() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 05/21] vmscan: Convert the writeback handling in shrink_page_list() to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 06/21] swap: Turn get_swap_page() into folio_alloc_swap() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 07/21] swap: Convert add_to_swap() to take a folio Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 08/21] vmscan: Convert dirty page handling to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 09/21] vmscan: Convert page buffer handling to use folios Matthew Wilcox (Oracle)
2022-04-29 19:50   ` Andrew Morton
2022-04-29 19:23 ` [PATCH 10/21] vmscan: Convert lazy freeing to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 11/21] vmscan: Move initialisation of mapping down Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 12/21] vmscan: Convert the activate_locked portion of shrink_page_list to folios Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 13/21] vmscan: Remove remaining uses of page in shrink_page_list Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 14/21] mm/shmem: Use a folio in shmem_unused_huge_shrink Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 15/21] mm/swap: Add folio_throttle_swaprate Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 16/21] mm/shmem: Convert shmem_add_to_page_cache to take a folio Matthew Wilcox (Oracle)
2022-05-03 11:10   ` Sebastian Andrzej Siewior
2022-05-03 12:48     ` Matthew Wilcox
2022-05-03 13:00       ` Sebastian Andrzej Siewior
2022-05-03 13:05         ` Matthew Wilcox
2022-05-03 13:09           ` Sebastian Andrzej Siewior
2022-04-29 19:23 ` [PATCH 17/21] mm/shmem: Turn shmem_should_replace_page into shmem_should_replace_folio Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 18/21] mm/shmem: Turn shmem_alloc_page() into shmem_alloc_folio() Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 19/21] mm/shmem: Convert shmem_alloc_and_acct_page to use a folio Matthew Wilcox (Oracle)
2022-04-29 19:23 ` [PATCH 20/21] mm/shmem: Convert shmem_getpage_gfp " Matthew Wilcox (Oracle)
2022-04-29 19:23 ` Matthew Wilcox (Oracle) [this message]
2022-05-03 15:14 ` [PATCH 00/21] Folio patches for 5.19 Nathan Chancellor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220429192329.3034378-22-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linuxfoundation.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.