linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Christoph Hellwig <hch@lst.de>
Subject: [PATCH v14 10/39] mm/migrate: Add folio_migrate_mapping()
Date: Thu, 15 Jul 2021 21:00:01 +0100	[thread overview]
Message-ID: <20210715200030.899216-11-willy@infradead.org> (raw)
In-Reply-To: <20210715200030.899216-1-willy@infradead.org>

Reimplement migrate_page_move_mapping() as a wrapper around
folio_migrate_mapping().  Saves 193 bytes of kernel text.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/migrate.h |  2 +
 mm/folio-compat.c       | 11 ++++++
 mm/migrate.c            | 85 +++++++++++++++++++++--------------------
 3 files changed, 57 insertions(+), 41 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 23dadf7aeba8..eb14495a1f46 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -51,6 +51,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
 				  struct page *newpage, struct page *page);
 extern int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page, int extra_count);
+int folio_migrate_mapping(struct address_space *mapping,
+		struct folio *newfolio, struct folio *folio, int extra_count);
 #else
 
 static inline void putback_movable_pages(struct list_head *l) {}
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index a374747ae1c6..d883d964fd52 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -4,6 +4,7 @@
  * eventually.
  */
 
+#include <linux/migrate.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
 
@@ -48,3 +49,13 @@ void mark_page_accessed(struct page *page)
 	folio_mark_accessed(page_folio(page));
 }
 EXPORT_SYMBOL(mark_page_accessed);
+
+#ifdef CONFIG_MIGRATION
+int migrate_page_move_mapping(struct address_space *mapping,
+		struct page *newpage, struct page *page, int extra_count)
+{
+	return folio_migrate_mapping(mapping, page_folio(newpage),
+					page_folio(page), extra_count);
+}
+EXPORT_SYMBOL(migrate_page_move_mapping);
+#endif
diff --git a/mm/migrate.c b/mm/migrate.c
index 910552318df3..aa4f2310c5bb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -363,7 +363,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
 	 */
 	expected_count += is_device_private_page(page);
 	if (mapping)
-		expected_count += thp_nr_pages(page) + page_has_private(page);
+		expected_count += compound_nr(page) + page_has_private(page);
 
 	return expected_count;
 }
@@ -376,74 +376,75 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
  * 2 for pages with a mapping
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
-int migrate_page_move_mapping(struct address_space *mapping,
-		struct page *newpage, struct page *page, int extra_count)
+int folio_migrate_mapping(struct address_space *mapping,
+		struct folio *newfolio, struct folio *folio, int extra_count)
 {
-	XA_STATE(xas, &mapping->i_pages, page_index(page));
+	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
 	struct zone *oldzone, *newzone;
 	int dirty;
-	int expected_count = expected_page_refs(mapping, page) + extra_count;
-	int nr = thp_nr_pages(page);
+	int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
+	int nr = folio_nr_pages(folio);
 
 	if (!mapping) {
 		/* Anonymous page without mapping */
-		if (page_count(page) != expected_count)
+		if (folio_ref_count(folio) != expected_count)
 			return -EAGAIN;
 
 		/* No turning back from here */
-		newpage->index = page->index;
-		newpage->mapping = page->mapping;
-		if (PageSwapBacked(page))
-			__SetPageSwapBacked(newpage);
+		newfolio->index = folio->index;
+		newfolio->mapping = folio->mapping;
+		if (folio_test_swapbacked(folio))
+			__folio_set_swapbacked(newfolio);
 
 		return MIGRATEPAGE_SUCCESS;
 	}
 
-	oldzone = page_zone(page);
-	newzone = page_zone(newpage);
+	oldzone = folio_zone(folio);
+	newzone = folio_zone(newfolio);
 
 	xas_lock_irq(&xas);
-	if (page_count(page) != expected_count || xas_load(&xas) != page) {
+	if (folio_ref_count(folio) != expected_count ||
+	    xas_load(&xas) != folio) {
 		xas_unlock_irq(&xas);
 		return -EAGAIN;
 	}
 
-	if (!page_ref_freeze(page, expected_count)) {
+	if (!folio_ref_freeze(folio, expected_count)) {
 		xas_unlock_irq(&xas);
 		return -EAGAIN;
 	}
 
 	/*
-	 * Now we know that no one else is looking at the page:
+	 * Now we know that no one else is looking at the folio:
 	 * no turning back from here.
 	 */
-	newpage->index = page->index;
-	newpage->mapping = page->mapping;
-	page_ref_add(newpage, nr); /* add cache reference */
-	if (PageSwapBacked(page)) {
-		__SetPageSwapBacked(newpage);
-		if (PageSwapCache(page)) {
-			SetPageSwapCache(newpage);
-			set_page_private(newpage, page_private(page));
+	newfolio->index = folio->index;
+	newfolio->mapping = folio->mapping;
+	folio_ref_add(newfolio, nr); /* add cache reference */
+	if (folio_test_swapbacked(folio)) {
+		__folio_set_swapbacked(newfolio);
+		if (folio_test_swapcache(folio)) {
+			folio_set_swapcache(newfolio);
+			newfolio->private = folio_get_private(folio);
 		}
 	} else {
-		VM_BUG_ON_PAGE(PageSwapCache(page), page);
+		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
 	}
 
 	/* Move dirty while page refs frozen and newpage not yet exposed */
-	dirty = PageDirty(page);
+	dirty = folio_test_dirty(folio);
 	if (dirty) {
-		ClearPageDirty(page);
-		SetPageDirty(newpage);
+		folio_clear_dirty(folio);
+		folio_set_dirty(newfolio);
 	}
 
-	xas_store(&xas, newpage);
-	if (PageTransHuge(page)) {
+	xas_store(&xas, newfolio);
+	if (nr > 1) {
 		int i;
 
 		for (i = 1; i < nr; i++) {
 			xas_next(&xas);
-			xas_store(&xas, newpage);
+			xas_store(&xas, newfolio);
 		}
 	}
 
@@ -452,7 +453,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 	 * to one less reference.
 	 * We know this isn't the last reference.
 	 */
-	page_ref_unfreeze(page, expected_count - nr);
+	folio_ref_unfreeze(folio, expected_count - nr);
 
 	xas_unlock(&xas);
 	/* Leave irq disabled to prevent preemption while updating stats */
@@ -471,18 +472,18 @@ int migrate_page_move_mapping(struct address_space *mapping,
 		struct lruvec *old_lruvec, *new_lruvec;
 		struct mem_cgroup *memcg;
 
-		memcg = page_memcg(page);
+		memcg = folio_memcg(folio);
 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 
 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
-		if (PageSwapBacked(page) && !PageSwapCache(page)) {
+		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 		}
 #ifdef CONFIG_SWAP
-		if (PageSwapCache(page)) {
+		if (folio_test_swapcache(folio)) {
 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
 		}
@@ -498,11 +499,11 @@ int migrate_page_move_mapping(struct address_space *mapping,
 
 	return MIGRATEPAGE_SUCCESS;
 }
-EXPORT_SYMBOL(migrate_page_move_mapping);
+EXPORT_SYMBOL(folio_migrate_mapping);
 
 /*
  * The expected number of remaining references is the same as that
- * of migrate_page_move_mapping().
+ * of folio_migrate_mapping().
  */
 int migrate_huge_page_move_mapping(struct address_space *mapping,
 				   struct page *newpage, struct page *page)
@@ -563,7 +564,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
 	if (PageMappedToDisk(page))
 		SetPageMappedToDisk(newpage);
 
-	/* Move dirty on pages not done by migrate_page_move_mapping() */
+	/* Move dirty on pages not done by folio_migrate_mapping() */
 	if (PageDirty(page))
 		SetPageDirty(newpage);
 
@@ -639,11 +640,13 @@ int migrate_page(struct address_space *mapping,
 		struct page *newpage, struct page *page,
 		enum migrate_mode mode)
 {
+	struct folio *newfolio = page_folio(newpage);
+	struct folio *folio = page_folio(page);
 	int rc;
 
-	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
+	BUG_ON(folio_test_writeback(folio));	/* Writeback must be complete */
 
-	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
+	rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
 
 	if (rc != MIGRATEPAGE_SUCCESS)
 		return rc;
@@ -2387,7 +2390,7 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
  * @page: struct page to check
  *
  * Pinned pages cannot be migrated. This is the same test as in
- * migrate_page_move_mapping(), except that here we allow migration of a
+ * folio_migrate_mapping(), except that here we allow migration of a
  * ZONE_DEVICE page.
  */
 static bool migrate_vma_check_page(struct page *page)
-- 
2.30.2



  parent reply	other threads:[~2021-07-15 20:11 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-15 19:59 [PATCH v14c 00/39] Memory folios: Pagecache edition Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 01/39] mm: Add folio_pfn() Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 02/39] mm: Add folio_raw_mapping() Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 03/39] mm: Add flush_dcache_folio() Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 04/39] mm: Add kmap_local_folio() Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 05/39] mm: Add arch_make_folio_accessible() Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 06/39] mm: Add folio_young and folio_idle Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 07/39] mm/swap: Add folio_activate() Matthew Wilcox (Oracle)
2021-07-15 19:59 ` [PATCH v14 08/39] mm/swap: Add folio_mark_accessed() Matthew Wilcox (Oracle)
2023-10-08 15:34   ` Gregory Price
2023-10-10 18:08     ` Matthew Wilcox
2023-10-13 16:38       ` Gregory Price
2021-07-15 20:00 ` [PATCH v14 09/39] mm/rmap: Add folio_mkclean() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` Matthew Wilcox (Oracle) [this message]
2021-07-15 20:00 ` [PATCH v14 11/39] mm/migrate: Add folio_migrate_flags() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 12/39] mm/migrate: Add folio_migrate_copy() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 13/39] mm/writeback: Rename __add_wb_stat() to wb_stat_mod() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 14/39] flex_proportions: Allow N events instead of 1 Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 15/39] mm/writeback: Change __wb_writeout_inc() to __wb_writeout_add() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 16/39] mm/writeback: Add __folio_end_writeback() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 17/39] mm/writeback: Add folio_start_writeback() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 18/39] mm/writeback: Add folio_mark_dirty() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 19/39] mm/writeback: Add __folio_mark_dirty() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 20/39] mm/writeback: Convert tracing writeback_page_template to folios Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 21/39] mm/writeback: Add filemap_dirty_folio() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 22/39] mm/writeback: Add folio_account_cleaned() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 23/39] mm/writeback: Add folio_cancel_dirty() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 24/39] mm/writeback: Add folio_clear_dirty_for_io() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 25/39] mm/writeback: Add folio_account_redirty() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 26/39] mm/writeback: Add folio_redirty_for_writepage() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 27/39] mm/filemap: Add i_blocks_per_folio() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 28/39] mm/filemap: Add folio_mkwrite_check_truncate() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 29/39] mm/filemap: Add readahead_folio() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 30/39] mm/workingset: Convert workingset_refault() to take a folio Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 31/39] mm: Add folio_evictable() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 32/39] mm/lru: Convert __pagevec_lru_add_fn to take a folio Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 33/39] mm/lru: Add folio_add_lru() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 34/39] mm/page_alloc: Add folio allocation functions Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 35/39] mm/filemap: Add filemap_alloc_folio Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 36/39] mm/filemap: Add filemap_add_folio() Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 37/39] mm/filemap: Convert mapping_get_entry to return a folio Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 38/39] mm/filemap: Add filemap_get_folio Matthew Wilcox (Oracle)
2021-07-15 20:00 ` [PATCH v14 39/39] mm/filemap: Add FGP_STABLE Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210715200030.899216-11-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=hch@lst.de \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).