All of lore.kernel.org
 help / color / mirror / Atom feed
From: js1304@gmail.com
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	kernel-team@lge.com, Vlastimil Babka <vbabka@suse.cz>,
	Christoph Hellwig <hch@infradead.org>,
	Roman Gushchin <guro@fb.com>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Michal Hocko <mhocko@suse.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>
Subject: [PATCH v3 5/8] mm/migrate: make a standard migration target allocation function
Date: Tue, 23 Jun 2020 15:13:45 +0900	[thread overview]
Message-ID: <1592892828-1934-6-git-send-email-iamjoonsoo.kim@lge.com> (raw)
In-Reply-To: <1592892828-1934-1-git-send-email-iamjoonsoo.kim@lge.com>

From: Joonsoo Kim <iamjoonsoo.kim@lge.com>

There are some similar functions for migration target allocation. Since
there is no fundamental difference, it's better to keep just one rather
than keeping all variants. This patch implements base migration target
allocation function. In the following patches, variants will be converted
to use this function.

Note that PageHighmem() call in previous function is changed to open-code
"is_highmem_idx()" since it provides more readability.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/migrate.h |  5 +++--
 mm/internal.h           |  7 +++++++
 mm/memory-failure.c     |  8 ++++++--
 mm/memory_hotplug.c     | 14 +++++++++-----
 mm/migrate.c            | 21 +++++++++++++--------
 mm/page_isolation.c     |  8 ++++++--
 6 files changed, 44 insertions(+), 19 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 1d70b4a..5e9c866 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -10,6 +10,8 @@
 typedef struct page *new_page_t(struct page *page, unsigned long private);
 typedef void free_page_t(struct page *page, unsigned long private);
 
+struct migration_target_control;
+
 /*
  * Return values from addresss_space_operations.migratepage():
  * - negative errno on page migration failure;
@@ -39,8 +41,7 @@ extern int migrate_page(struct address_space *mapping,
 			enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 		unsigned long private, enum migrate_mode mode, int reason);
-extern struct page *new_page_nodemask(struct page *page,
-		int preferred_nid, nodemask_t *nodemask);
+extern struct page *alloc_migration_target(struct page *page, unsigned long private);
 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 extern void putback_movable_page(struct page *page);
 
diff --git a/mm/internal.h b/mm/internal.h
index 42cf0b6..f725aa8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -614,4 +614,11 @@ static inline bool is_migrate_highatomic_page(struct page *page)
 
 void setup_zone_pageset(struct zone *zone);
 extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
+
+struct migration_target_control {
+	int nid;		/* preferred node id */
+	nodemask_t *nmask;
+	gfp_t gfp_mask;
+};
+
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 47b8ccb..820ea5e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1648,9 +1648,13 @@ EXPORT_SYMBOL(unpoison_memory);
 
 static struct page *new_page(struct page *p, unsigned long private)
 {
-	int nid = page_to_nid(p);
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(p),
+		.nmask = &node_states[N_MEMORY],
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
-	return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
+	return alloc_migration_target(p, (unsigned long)&mtc);
 }
 
 /*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index be3c62e3..d2b65a5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1259,19 +1259,23 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
 
 static struct page *new_node_page(struct page *page, unsigned long private)
 {
-	int nid = page_to_nid(page);
 	nodemask_t nmask = node_states[N_MEMORY];
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(page),
+		.nmask = &nmask,
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
 	/*
 	 * try to allocate from a different node but reuse this node if there
 	 * are no other online nodes to be used (e.g. we are offlining a part
 	 * of the only existing node)
 	 */
-	node_clear(nid, nmask);
-	if (nodes_empty(nmask))
-		node_set(nid, nmask);
+	node_clear(mtc.nid, *mtc.nmask);
+	if (nodes_empty(*mtc.nmask))
+		node_set(mtc.nid, *mtc.nmask);
 
-	return new_page_nodemask(page, nid, &nmask);
+	return alloc_migration_target(page, (unsigned long)&mtc);
 }
 
 static int
diff --git a/mm/migrate.c b/mm/migrate.c
index 634f1ea..3afff59 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1536,29 +1536,34 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
 	return rc;
 }
 
-struct page *new_page_nodemask(struct page *page,
-				int preferred_nid, nodemask_t *nodemask)
+struct page *alloc_migration_target(struct page *page, unsigned long private)
 {
-	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+	struct migration_target_control *mtc;
+	gfp_t gfp_mask;
 	unsigned int order = 0;
 	struct page *new_page = NULL;
+	int zidx;
+
+	mtc = (struct migration_target_control *)private;
+	gfp_mask = mtc->gfp_mask;
 
 	if (PageHuge(page)) {
 		return alloc_huge_page_nodemask(
-				page_hstate(compound_head(page)),
-				preferred_nid, nodemask, 0, false);
+				page_hstate(compound_head(page)), mtc->nid,
+				mtc->nmask, gfp_mask, false);
 	}
 
 	if (PageTransHuge(page)) {
+		gfp_mask &= ~__GFP_RECLAIM;
 		gfp_mask |= GFP_TRANSHUGE;
 		order = HPAGE_PMD_ORDER;
 	}
-
-	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
+	zidx = zone_idx(page_zone(page));
+	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
 		gfp_mask |= __GFP_HIGHMEM;
 
 	new_page = __alloc_pages_nodemask(gfp_mask, order,
-				preferred_nid, nodemask);
+				mtc->nid, mtc->nmask);
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index aec26d9..adba031 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -309,7 +309,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 
 struct page *alloc_migrate_target(struct page *page, unsigned long private)
 {
-	int nid = page_to_nid(page);
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(page),
+		.nmask = &node_states[N_MEMORY],
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
-	return new_page_nodemask(page, nid, &node_states[N_MEMORY]);
+	return alloc_migration_target(page, (unsigned long)&mtc);
 }
-- 
2.7.4


  parent reply	other threads:[~2020-06-23  6:14 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-23  6:13 [PATCH v3 0/8] clean-up the migration target allocation functions js1304
2020-06-23  6:13 ` [PATCH v3 1/8] mm/page_isolation: prefer the node of the source page js1304
2020-06-23  6:13 ` [PATCH v3 2/8] mm/migrate: move migration helper from .h to .c js1304
2020-06-23  6:13 ` [PATCH v3 3/8] mm/hugetlb: unify migration callbacks js1304
2020-06-24 21:18   ` Mike Kravetz
2020-06-25 11:26   ` Michal Hocko
2020-06-26  4:02     ` Joonsoo Kim
2020-06-26  4:02       ` Joonsoo Kim
2020-07-02 16:13       ` Vlastimil Babka
2020-07-03  0:55         ` Joonsoo Kim
2020-07-03  0:55           ` Joonsoo Kim
2020-06-23  6:13 ` [PATCH v3 4/8] mm/hugetlb: make hugetlb migration callback CMA aware js1304
2020-06-25 11:54   ` Michal Hocko
2020-06-26  4:49     ` Joonsoo Kim
2020-06-26  4:49       ` Joonsoo Kim
2020-06-26  7:23       ` Michal Hocko
2020-06-29  6:27         ` Joonsoo Kim
2020-06-29  6:27           ` Joonsoo Kim
2020-06-29  7:55           ` Michal Hocko
2020-06-30  6:30             ` Joonsoo Kim
2020-06-30  6:30               ` Joonsoo Kim
2020-06-30  6:42               ` Michal Hocko
2020-06-30  7:22                 ` Joonsoo Kim
2020-06-30  7:22                   ` Joonsoo Kim
2020-06-30 16:37                   ` Mike Kravetz
2020-06-23  6:13 ` js1304 [this message]
2020-06-25 12:05   ` [PATCH v3 5/8] mm/migrate: make a standard migration target allocation function Michal Hocko
2020-06-26  5:02     ` Joonsoo Kim
2020-06-26  5:02       ` Joonsoo Kim
2020-06-26  7:33       ` Michal Hocko
2020-06-29  6:41         ` Joonsoo Kim
2020-06-29  6:41           ` Joonsoo Kim
2020-06-29  8:03           ` Michal Hocko
2020-06-30  7:19             ` Joonsoo Kim
2020-06-30  7:19               ` Joonsoo Kim
2020-07-03 15:25   ` Vlastimil Babka
2020-06-23  6:13 ` [PATCH v3 6/8] mm/gup: use a standard migration target allocation callback js1304
2020-06-25 12:08   ` Michal Hocko
2020-06-26  5:03     ` Joonsoo Kim
2020-06-26  5:03       ` Joonsoo Kim
2020-07-03 15:56   ` Vlastimil Babka
2020-07-06  8:34     ` Joonsoo Kim
2020-07-06  8:34       ` Joonsoo Kim
2020-06-23  6:13 ` [PATCH v3 7/8] mm/mempolicy: " js1304
2020-06-25 12:09   ` Michal Hocko
2020-07-03 15:59   ` Vlastimil Babka
2020-07-08  1:20   ` Qian Cai
2020-07-08  6:45     ` Michal Hocko
2020-10-08  3:21     ` Hugh Dickins
2020-10-08  3:21       ` Hugh Dickins
2020-10-08 17:29       ` Mike Kravetz
2020-10-09  5:50         ` Hugh Dickins
2020-10-09  5:50           ` Hugh Dickins
2020-10-09 17:42           ` Mike Kravetz
2020-10-09 22:23             ` Hugh Dickins
2020-10-09 22:23               ` Hugh Dickins
2020-10-10  0:25               ` Mike Kravetz
2020-06-23  6:13 ` [PATCH v3 8/8] mm/page_alloc: remove a wrapper for alloc_migration_target() js1304
2020-06-25 12:10   ` Michal Hocko
2020-07-03 16:18   ` Vlastimil Babka
2020-07-06  8:44     ` Joonsoo Kim
2020-07-06  8:44       ` Joonsoo Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1592892828-1934-6-git-send-email-iamjoonsoo.kim@lge.com \
    --to=js1304@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=guro@fb.com \
    --cc=hch@infradead.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kernel-team@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=n-horiguchi@ah.jp.nec.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.