All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linux-foundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v2 18/46] mm/migrate: Add folio_migrate_mapping()
Date: Tue, 22 Jun 2021 13:15:23 +0100	[thread overview]
Message-ID: <20210622121551.3398730-19-willy@infradead.org> (raw)
In-Reply-To: <20210622121551.3398730-1-willy@infradead.org>

Reimplement migrate_page_move_mapping() as a wrapper around
folio_migrate_mapping().  Saves 193 bytes of kernel text.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/migrate.h |  2 +
 mm/folio-compat.c       | 11 ++++++
 mm/migrate.c            | 85 +++++++++++++++++++++--------------------
 3 files changed, 57 insertions(+), 41 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4bb4e519e3f5..a4ff65e9c1e3 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -51,6 +51,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
 				  struct page *newpage, struct page *page);
 extern int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page, int extra_count);
+int folio_migrate_mapping(struct address_space *mapping,
+		struct folio *newfolio, struct folio *folio, int extra_count);
 #else
 
 static inline void putback_movable_pages(struct list_head *l) {}
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index d229b979b00d..25c2269655f4 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -4,6 +4,7 @@
  * eventually.
  */
 
+#include <linux/migrate.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
 
@@ -60,3 +61,13 @@ void mem_cgroup_uncharge(struct page *page)
 	folio_uncharge_cgroup(page_folio(page));
 }
 #endif
+
+#ifdef CONFIG_MIGRATION
+int migrate_page_move_mapping(struct address_space *mapping,
+		struct page *newpage, struct page *page, int extra_count)
+{
+	return folio_migrate_mapping(mapping, page_folio(newpage),
+					page_folio(page), extra_count);
+}
+EXPORT_SYMBOL(migrate_page_move_mapping);
+#endif
diff --git a/mm/migrate.c b/mm/migrate.c
index fff63e139767..b668970acd11 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -355,7 +355,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
 	 */
 	expected_count += is_device_private_page(page);
 	if (mapping)
-		expected_count += thp_nr_pages(page) + page_has_private(page);
+		expected_count += compound_nr(page) + page_has_private(page);
 
 	return expected_count;
 }
@@ -368,74 +368,75 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
  * 2 for pages with a mapping
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
-int migrate_page_move_mapping(struct address_space *mapping,
-		struct page *newpage, struct page *page, int extra_count)
+int folio_migrate_mapping(struct address_space *mapping,
+		struct folio *newfolio, struct folio *folio, int extra_count)
 {
-	XA_STATE(xas, &mapping->i_pages, page_index(page));
+	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
 	struct zone *oldzone, *newzone;
 	int dirty;
-	int expected_count = expected_page_refs(mapping, page) + extra_count;
-	int nr = thp_nr_pages(page);
+	int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
+	int nr = folio_nr_pages(folio);
 
 	if (!mapping) {
 		/* Anonymous page without mapping */
-		if (page_count(page) != expected_count)
+		if (folio_ref_count(folio) != expected_count)
 			return -EAGAIN;
 
 		/* No turning back from here */
-		newpage->index = page->index;
-		newpage->mapping = page->mapping;
-		if (PageSwapBacked(page))
-			__SetPageSwapBacked(newpage);
+		newfolio->index = folio->index;
+		newfolio->mapping = folio->mapping;
+		if (folio_swapbacked(folio))
+			__folio_set_swapbacked_flag(newfolio);
 
 		return MIGRATEPAGE_SUCCESS;
 	}
 
-	oldzone = page_zone(page);
-	newzone = page_zone(newpage);
+	oldzone = folio_zone(folio);
+	newzone = folio_zone(newfolio);
 
 	xas_lock_irq(&xas);
-	if (page_count(page) != expected_count || xas_load(&xas) != page) {
+	if (folio_ref_count(folio) != expected_count ||
+	    xas_load(&xas) != folio) {
 		xas_unlock_irq(&xas);
 		return -EAGAIN;
 	}
 
-	if (!page_ref_freeze(page, expected_count)) {
+	if (!folio_ref_freeze(folio, expected_count)) {
 		xas_unlock_irq(&xas);
 		return -EAGAIN;
 	}
 
 	/*
-	 * Now we know that no one else is looking at the page:
+	 * Now we know that no one else is looking at the folio:
 	 * no turning back from here.
 	 */
-	newpage->index = page->index;
-	newpage->mapping = page->mapping;
-	page_ref_add(newpage, nr); /* add cache reference */
-	if (PageSwapBacked(page)) {
-		__SetPageSwapBacked(newpage);
-		if (PageSwapCache(page)) {
-			SetPageSwapCache(newpage);
-			set_page_private(newpage, page_private(page));
+	newfolio->index = folio->index;
+	newfolio->mapping = folio->mapping;
+	folio_ref_add(newfolio, nr); /* add cache reference */
+	if (folio_swapbacked(folio)) {
+		__folio_set_swapbacked_flag(newfolio);
+		if (folio_swapcache(folio)) {
+			folio_set_swapcache_flag(newfolio);
+			newfolio->private = folio_get_private(folio);
 		}
 	} else {
-		VM_BUG_ON_PAGE(PageSwapCache(page), page);
+		VM_BUG_ON_FOLIO(folio_swapcache(folio), folio);
 	}
 
 	/* Move dirty while page refs frozen and newpage not yet exposed */
-	dirty = PageDirty(page);
+	dirty = folio_dirty(folio);
 	if (dirty) {
-		ClearPageDirty(page);
-		SetPageDirty(newpage);
+		folio_clear_dirty_flag(folio);
+		folio_set_dirty_flag(newfolio);
 	}
 
-	xas_store(&xas, newpage);
-	if (PageTransHuge(page)) {
+	xas_store(&xas, newfolio);
+	if (nr > 1) {
 		int i;
 
 		for (i = 1; i < nr; i++) {
 			xas_next(&xas);
-			xas_store(&xas, newpage);
+			xas_store(&xas, newfolio);
 		}
 	}
 
@@ -444,7 +445,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 	 * to one less reference.
 	 * We know this isn't the last reference.
 	 */
-	page_ref_unfreeze(page, expected_count - nr);
+	folio_ref_unfreeze(folio, expected_count - nr);
 
 	xas_unlock(&xas);
 	/* Leave irq disabled to prevent preemption while updating stats */
@@ -463,18 +464,18 @@ int migrate_page_move_mapping(struct address_space *mapping,
 		struct lruvec *old_lruvec, *new_lruvec;
 		struct mem_cgroup *memcg;
 
-		memcg = page_memcg(page);
+		memcg = folio_memcg(folio);
 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 
 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
-		if (PageSwapBacked(page) && !PageSwapCache(page)) {
+		if (folio_swapbacked(folio) && !folio_swapcache(folio)) {
 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 		}
 #ifdef CONFIG_SWAP
-		if (PageSwapCache(page)) {
+		if (folio_swapcache(folio)) {
 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
 		}
@@ -490,11 +491,11 @@ int migrate_page_move_mapping(struct address_space *mapping,
 
 	return MIGRATEPAGE_SUCCESS;
 }
-EXPORT_SYMBOL(migrate_page_move_mapping);
+EXPORT_SYMBOL(folio_migrate_mapping);
 
 /*
  * The expected number of remaining references is the same as that
- * of migrate_page_move_mapping().
+ * of folio_migrate_mapping().
  */
 int migrate_huge_page_move_mapping(struct address_space *mapping,
 				   struct page *newpage, struct page *page)
@@ -603,7 +604,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
 	if (PageMappedToDisk(page))
 		SetPageMappedToDisk(newpage);
 
-	/* Move dirty on pages not done by migrate_page_move_mapping() */
+	/* Move dirty on pages not done by folio_migrate_mapping() */
 	if (PageDirty(page))
 		SetPageDirty(newpage);
 
@@ -676,11 +677,13 @@ int migrate_page(struct address_space *mapping,
 		struct page *newpage, struct page *page,
 		enum migrate_mode mode)
 {
+	struct folio *newfolio = page_folio(newpage);
+	struct folio *folio = page_folio(page);
 	int rc;
 
-	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
+	BUG_ON(folio_writeback(folio));	/* Writeback must be complete */
 
-	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
+	rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
 
 	if (rc != MIGRATEPAGE_SUCCESS)
 		return rc;
@@ -2536,7 +2539,7 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
  * @page: struct page to check
  *
  * Pinned pages cannot be migrated. This is the same test as in
- * migrate_page_move_mapping(), except that here we allow migration of a
+ * folio_migrate_mapping(), except that here we allow migration of a
  * ZONE_DEVICE page.
  */
 static bool migrate_vma_check_page(struct page *page)
-- 
2.30.2


  parent reply	other threads:[~2021-06-22 12:31 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-22 12:15 [PATCH v2 00/46] Folio-enabling the page cache Matthew Wilcox (Oracle)
2021-06-22 12:15 ` [PATCH v2 01/46] mm: Add folio_to_pfn() Matthew Wilcox (Oracle)
2021-06-23  7:49   ` Christoph Hellwig
2021-06-24 15:12     ` Matthew Wilcox
2021-06-28  6:18       ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 02/46] mm: Add folio_rmapping() Matthew Wilcox (Oracle)
2021-06-23  7:56   ` Christoph Hellwig
2021-06-24 15:51     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 03/46] mm: Add kmap_local_folio() Matthew Wilcox (Oracle)
2021-06-23  7:58   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 04/46] mm: Add flush_dcache_folio() Matthew Wilcox (Oracle)
2021-06-22 12:15 ` [PATCH v2 05/46] mm: Add arch_make_folio_accessible() Matthew Wilcox (Oracle)
2021-06-23  8:00   ` Christoph Hellwig
2021-06-24 15:57     ` Matthew Wilcox
2021-06-28  6:21       ` Christoph Hellwig
2021-06-28 14:07         ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 06/46] mm: Add folio_young() and folio_idle() Matthew Wilcox (Oracle)
2021-06-22 12:15 ` [PATCH v2 07/46] mm/workingset: Convert workingset_activation to take a folio Matthew Wilcox (Oracle)
2021-06-23  8:02   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 08/46] mm/swap: Add folio_activate() Matthew Wilcox (Oracle)
2021-06-23  8:04   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 09/46] mm/swap: Add folio_mark_accessed() Matthew Wilcox (Oracle)
2021-06-23  8:07   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 10/46] mm/rmap: Add folio_mkclean() Matthew Wilcox (Oracle)
2021-06-23  8:08   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 11/46] mm/memcg: Remove 'page' parameter to mem_cgroup_charge_statistics() Matthew Wilcox (Oracle)
2021-06-23  8:09   ` Christoph Hellwig
2021-06-25  7:58   ` Michal Hocko
2021-06-22 12:15 ` [PATCH v2 12/46] mm/memcg: Use the node id in mem_cgroup_update_tree() Matthew Wilcox (Oracle)
2021-06-23  8:12   ` Christoph Hellwig
2021-06-24 16:19     ` Matthew Wilcox
2021-06-25  8:05       ` Michal Hocko
2021-06-25  8:05         ` Michal Hocko
2021-06-22 12:15 ` [PATCH v2 13/46] mm/memcg: Convert commit_charge() to take a folio Matthew Wilcox (Oracle)
2021-06-23  8:13   ` Christoph Hellwig
2021-06-25  8:11   ` Michal Hocko
2021-06-22 12:15 ` [PATCH v2 14/46] mm/memcg: Add folio_charge_cgroup() Matthew Wilcox (Oracle)
2021-06-23  8:15   ` Christoph Hellwig
2021-06-24 16:42     ` Matthew Wilcox
2021-06-25  8:22       ` Michal Hocko
2021-06-25 11:34         ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 15/46] mm/memcg: Add folio_uncharge_cgroup() Matthew Wilcox (Oracle)
2021-06-23  8:16   ` Christoph Hellwig
2021-06-25  8:25   ` Michal Hocko
2021-06-25 11:21     ` Matthew Wilcox
2021-06-25 13:21       ` Michal Hocko
2021-06-22 12:15 ` [PATCH v2 16/46] mm/memcg: Add folio_migrate_cgroup() Matthew Wilcox (Oracle)
2021-06-23  8:19   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 17/46] mm/memcg: Convert mem_cgroup_track_foreign_dirty_slowpath() to folio Matthew Wilcox (Oracle)
2021-06-23  8:21   ` Christoph Hellwig
2021-06-24 17:37     ` Matthew Wilcox
2021-06-28  6:24       ` Christoph Hellwig
2021-06-22 12:15 ` Matthew Wilcox (Oracle) [this message]
2021-06-23  8:22   ` [PATCH v2 18/46] mm/migrate: Add folio_migrate_mapping() Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 19/46] mm/migrate: Add folio_migrate_flags() Matthew Wilcox (Oracle)
2021-06-23  8:28   ` Christoph Hellwig
2021-06-24 17:55     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 20/46] mm/migrate: Add folio_migrate_copy() Matthew Wilcox (Oracle)
2021-06-23  8:35   ` Christoph Hellwig
2021-06-24 18:02     ` Matthew Wilcox
2021-06-28  6:26       ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 21/46] mm/writeback: Rename __add_wb_stat() to wb_stat_mod() Matthew Wilcox (Oracle)
2021-06-23  8:37   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 22/46] flex_proportions: Allow N events instead of 1 Matthew Wilcox (Oracle)
2021-06-23  8:41   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 23/46] mm/writeback: Change __wb_writeout_inc() to __wb_writeout_add() Matthew Wilcox (Oracle)
2021-06-23  8:45   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 24/46] mm/writeback: Add __folio_end_writeback() Matthew Wilcox (Oracle)
2021-06-23  9:15   ` Christoph Hellwig
2021-06-24 18:20     ` Matthew Wilcox
2021-06-28  6:33       ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 25/46] mm/writeback: Add folio_start_writeback() Matthew Wilcox (Oracle)
2021-06-23  9:18   ` Christoph Hellwig
2021-06-24 18:33     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 26/46] mm/writeback: Add folio_mark_dirty() Matthew Wilcox (Oracle)
2021-06-23  9:21   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 27/46] mm/writeback: Add __folio_mark_dirty() Matthew Wilcox (Oracle)
2021-06-23  9:27   ` Christoph Hellwig
2021-06-24 18:37     ` Matthew Wilcox
2021-06-28  6:03       ` Christoph Hellwig
2021-06-28 15:43         ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 28/46] mm/writeback: Add filemap_dirty_folio() Matthew Wilcox (Oracle)
2021-06-23  9:32   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 29/46] mm/writeback: Add folio_account_cleaned() Matthew Wilcox (Oracle)
2021-06-23  9:36   ` Christoph Hellwig
2021-06-24 20:06     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 30/46] mm/writeback: Add folio_cancel_dirty() Matthew Wilcox (Oracle)
2021-06-23  9:39   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 31/46] mm/writeback: Add folio_clear_dirty_for_io() Matthew Wilcox (Oracle)
2021-06-23  9:43   ` Christoph Hellwig
2021-06-24 20:09     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 32/46] mm/writeback: Add folio_account_redirty() Matthew Wilcox (Oracle)
2021-06-23  9:44   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 33/46] mm/writeback: Add folio_redirty_for_writepage() Matthew Wilcox (Oracle)
2021-06-23  9:45   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 34/46] mm/filemap: Add i_blocks_per_folio() Matthew Wilcox (Oracle)
2021-06-23  9:46   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 35/46] mm/filemap: Add folio_mkwrite_check_truncate() Matthew Wilcox (Oracle)
2021-06-23  9:47   ` Christoph Hellwig
2021-06-23 13:19     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 36/46] mm/filemap: Add readahead_folio() Matthew Wilcox (Oracle)
2021-06-23  9:50   ` Christoph Hellwig
2021-06-24 23:46     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 37/46] mm/workingset: Convert workingset_refault() to take a folio Matthew Wilcox (Oracle)
2021-06-23  9:52   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 38/46] mm: Add folio_evictable() Matthew Wilcox (Oracle)
2021-06-23  9:54   ` Christoph Hellwig
2021-06-24 23:50     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 39/46] mm/lru: Convert __pagevec_lru_add_fn to take a folio Matthew Wilcox (Oracle)
2021-06-23  9:55   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 40/46] mm/lru: Add folio_add_lru() Matthew Wilcox (Oracle)
2021-06-23  9:56   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 41/46] mm/page_alloc: Add folio allocation functions Matthew Wilcox (Oracle)
2021-06-23  9:58   ` Christoph Hellwig
2021-06-25  1:06     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 42/46] mm/filemap: Add filemap_alloc_folio Matthew Wilcox (Oracle)
2021-06-23  9:59   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 43/46] mm/filemap: Add filemap_add_folio Matthew Wilcox (Oracle)
2021-06-23 11:30   ` Christoph Hellwig
2021-06-25  1:57     ` Matthew Wilcox
2021-06-22 12:15 ` [PATCH v2 44/46] mm/filemap: Convert mapping_get_entry to return a folio Matthew Wilcox (Oracle)
2021-06-23 11:32   ` Christoph Hellwig
2021-06-25  3:29     ` Matthew Wilcox
2021-06-28  6:07       ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 45/46] mm/filemap: Add filemap_get_folio Matthew Wilcox (Oracle)
2021-06-23 11:39   ` Christoph Hellwig
2021-06-22 12:15 ` [PATCH v2 46/46] mm/filemap: Add FGP_STABLE Matthew Wilcox (Oracle)
2021-06-23 11:43   ` Christoph Hellwig
2021-06-23 12:15     ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210622121551.3398730-19-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.