All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavel Tatashin <pasha.tatashin@soleen.com>
To: pasha.tatashin@soleen.com, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, akpm@linux-foundation.org, vbabka@suse.cz,
	mhocko@suse.com, david@redhat.com, osalvador@suse.de,
	dan.j.williams@intel.com, sashal@kernel.org,
	tyhicks@linux.microsoft.com, iamjoonsoo.kim@lge.com,
	mike.kravetz@oracle.com, rostedt@goodmis.org, mingo@redhat.com,
	jgg@ziepe.ca, peterz@infradead.org, mgorman@suse.de,
	willy@infradead.org, rientjes@google.com, jhubbard@nvidia.com,
	linux-doc@vger.kernel.org, ira.weiny@intel.com,
	linux-kselftest@vger.kernel.org
Subject: [PATCH v4 05/10] mm/gup: migrate pinned pages out of movable zone
Date: Thu, 17 Dec 2020 13:52:38 -0500	[thread overview]
Message-ID: <20201217185243.3288048-6-pasha.tatashin@soleen.com> (raw)
In-Reply-To: <20201217185243.3288048-1-pasha.tatashin@soleen.com>

We should not pin pages in ZONE_MOVABLE. Currently, we do not pin only
movable CMA pages. Generalize the function that migrates CMA pages to
migrate all movable pages. Use is_pinnable_page() to check which
pages need to be migrated

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
---
 include/linux/migrate.h        |  1 +
 include/linux/mmzone.h         | 11 ++++--
 include/trace/events/migrate.h |  3 +-
 mm/gup.c                       | 68 ++++++++++++++--------------------
 4 files changed, 39 insertions(+), 44 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4594838a0f7c..aae5ef0b3ba1 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -27,6 +27,7 @@ enum migrate_reason {
 	MR_MEMPOLICY_MBIND,
 	MR_NUMA_MISPLACED,
 	MR_CONTIG_RANGE,
+	MR_LONGTERM_PIN,
 	MR_TYPES
 };
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b593316bff3d..25c0c13ba4b1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -386,9 +386,14 @@ enum zone_type {
 	 * likely to succeed, and to locally limit unmovable allocations - e.g.,
 	 * to increase the number of THP/huge pages. Notable special cases are:
 	 *
-	 * 1. Pinned pages: (long-term) pinning of movable pages might
-	 *    essentially turn such pages unmovable. Memory offlining might
-	 *    retry a long time.
+	 * 1. Pinned pages: (long-term) pinning of movable pages is avoided
+	 *    when pages are pinned and faulted, but it is still possible that
+	 *    address space already has pages in ZONE_MOVABLE at the time when
+	 *    pages are pinned (i.e. user has touches that memory before
+	 *    pinning). In such case we try to migrate them to a different zone,
+	 *    but if migration fails the pages can still end-up pinned in
+	 *    ZONE_MOVABLE. In such case, memory offlining might retry a long
+	 *    time and will only succeed once user application unpins pages.
 	 * 2. memblock allocations: kernelcore/movablecore setups might create
 	 *    situations where ZONE_MOVABLE contains unmovable allocations
 	 *    after boot. Memory offlining and allocations fail early.
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 4d434398d64d..363b54ce104c 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -20,7 +20,8 @@
 	EM( MR_SYSCALL,		"syscall_or_cpuset")		\
 	EM( MR_MEMPOLICY_MBIND,	"mempolicy_mbind")		\
 	EM( MR_NUMA_MISPLACED,	"numa_misplaced")		\
-	EMe(MR_CONTIG_RANGE,	"contig_range")
+	EM( MR_CONTIG_RANGE,	"contig_range")			\
+	EMe(MR_LONGTERM_PIN,	"longterm_pin")
 
 /*
  * First define the enums in the above macros to be exported to userspace
diff --git a/mm/gup.c b/mm/gup.c
index 04602e94856b..591d8e2dfc70 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -89,11 +89,12 @@ static __maybe_unused struct page *try_grab_compound_head(struct page *page,
 		int orig_refs = refs;
 
 		/*
-		 * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
-		 * path, so fail and let the caller fall back to the slow path.
+		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
+		 * right zone, so fail and let the caller fall back to the slow
+		 * path.
 		 */
-		if (unlikely(flags & FOLL_LONGTERM) &&
-				is_migrate_cma_page(page))
+		if (unlikely((flags & FOLL_LONGTERM) &&
+			     !is_pinnable_page(page)))
 			return NULL;
 
 		/*
@@ -1549,19 +1550,18 @@ struct page *get_dump_page(unsigned long addr)
 }
 #endif /* CONFIG_ELF_CORE */
 
-#ifdef CONFIG_CMA
-static long check_and_migrate_cma_pages(struct mm_struct *mm,
-					unsigned long start,
-					unsigned long nr_pages,
-					struct page **pages,
-					struct vm_area_struct **vmas,
-					unsigned int gup_flags)
+static long check_and_migrate_movable_pages(struct mm_struct *mm,
+					    unsigned long start,
+					    unsigned long nr_pages,
+					    struct page **pages,
+					    struct vm_area_struct **vmas,
+					    unsigned int gup_flags)
 {
 	unsigned long i;
 	unsigned long step;
 	bool drain_allow = true;
 	bool migrate_allow = true;
-	LIST_HEAD(cma_page_list);
+	LIST_HEAD(movable_page_list);
 	long ret = nr_pages;
 	struct migration_target_control mtc = {
 		.nid = NUMA_NO_NODE,
@@ -1579,13 +1579,12 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
 		 */
 		step = compound_nr(head) - (pages[i] - head);
 		/*
-		 * If we get a page from the CMA zone, since we are going to
-		 * be pinning these entries, we might as well move them out
-		 * of the CMA zone if possible.
+		 * If we get a movable page, since we are going to be pinning
+		 * these entries, try to move them out if possible.
 		 */
-		if (is_migrate_cma_page(head)) {
+		if (!is_pinnable_page(head)) {
 			if (PageHuge(head))
-				isolate_huge_page(head, &cma_page_list);
+				isolate_huge_page(head, &movable_page_list);
 			else {
 				if (!PageLRU(head) && drain_allow) {
 					lru_add_drain_all();
@@ -1593,7 +1592,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
 				}
 
 				if (!isolate_lru_page(head)) {
-					list_add_tail(&head->lru, &cma_page_list);
+					list_add_tail(&head->lru, &movable_page_list);
 					mod_node_page_state(page_pgdat(head),
 							    NR_ISOLATED_ANON +
 							    page_is_file_lru(head),
@@ -1605,7 +1604,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
 		i += step;
 	}
 
-	if (!list_empty(&cma_page_list)) {
+	if (!list_empty(&movable_page_list)) {
 		/*
 		 * drop the above get_user_pages reference.
 		 */
@@ -1615,25 +1614,24 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
 			for (i = 0; i < nr_pages; i++)
 				put_page(pages[i]);
 
-		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
-			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+		if (migrate_pages(&movable_page_list, alloc_migration_target, NULL,
+			(unsigned long)&mtc, MIGRATE_SYNC, MR_LONGTERM_PIN)) {
 			/*
 			 * some of the pages failed migration. Do get_user_pages
 			 * without migration.
 			 */
 			migrate_allow = false;
 
-			if (!list_empty(&cma_page_list))
-				putback_movable_pages(&cma_page_list);
+			if (!list_empty(&movable_page_list))
+				putback_movable_pages(&movable_page_list);
 		}
 		/*
 		 * We did migrate all the pages, Try to get the page references
-		 * again migrating any new CMA pages which we failed to isolate
-		 * earlier.
+		 * again migrating any pages which we failed to isolate earlier.
 		 */
 		ret = __get_user_pages_locked(mm, start, nr_pages,
-						   pages, vmas, NULL,
-						   gup_flags);
+					      pages, vmas, NULL,
+					      gup_flags);
 
 		if ((ret > 0) && migrate_allow) {
 			nr_pages = ret;
@@ -1644,17 +1642,6 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
 
 	return ret;
 }
-#else
-static long check_and_migrate_cma_pages(struct mm_struct *mm,
-					unsigned long start,
-					unsigned long nr_pages,
-					struct page **pages,
-					struct vm_area_struct **vmas,
-					unsigned int gup_flags)
-{
-	return nr_pages;
-}
-#endif /* CONFIG_CMA */
 
 /*
  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
@@ -1678,8 +1665,9 @@ static long __gup_longterm_locked(struct mm_struct *mm,
 
 	if (gup_flags & FOLL_LONGTERM) {
 		if (rc > 0)
-			rc = check_and_migrate_cma_pages(mm, start, rc, pages,
-							 vmas, gup_flags);
+			rc = check_and_migrate_movable_pages(mm, start, rc,
+							     pages, vmas,
+							     gup_flags);
 		memalloc_pin_restore(flags);
 	}
 	return rc;
-- 
2.25.1


  parent reply	other threads:[~2020-12-17 18:54 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-17 18:52 [PATCH v4 00/10] prohibit pinning pages in ZONE_MOVABLE Pavel Tatashin
2020-12-17 18:52 ` [PATCH v4 01/10] mm/gup: don't pin migrated cma pages in movable zone Pavel Tatashin
2020-12-17 18:52 ` [PATCH v4 02/10] mm cma: rename PF_MEMALLOC_NOCMA to PF_MEMALLOC_PIN Pavel Tatashin
2020-12-17 18:52 ` [PATCH v4 03/10] mm: apply per-task gfp constraints in fast path Pavel Tatashin
2020-12-18  9:36   ` Michal Hocko
2020-12-18 12:23     ` Pavel Tatashin
2020-12-18 12:23       ` Pavel Tatashin
2020-12-17 18:52 ` [PATCH v4 04/10] mm: honor PF_MEMALLOC_PIN for all movable pages Pavel Tatashin
2020-12-17 18:52 ` Pavel Tatashin [this message]
2020-12-18  9:43   ` [PATCH v4 05/10] mm/gup: migrate pinned pages out of movable zone Michal Hocko
2020-12-18 12:24     ` Pavel Tatashin
2020-12-18 12:24       ` Pavel Tatashin
2020-12-18 13:08       ` Michal Hocko
2021-01-13 19:14         ` Pavel Tatashin
2021-01-13 19:14           ` Pavel Tatashin
2020-12-17 18:52 ` [PATCH v4 06/10] memory-hotplug.rst: add a note about ZONE_MOVABLE and page pinning Pavel Tatashin
2020-12-18  9:44   ` Michal Hocko
2020-12-17 18:52 ` [PATCH v4 07/10] mm/gup: change index type to long as it counts pages Pavel Tatashin
2020-12-18  9:50   ` Michal Hocko
2020-12-18 12:32     ` Pavel Tatashin
2020-12-18 12:32       ` Pavel Tatashin
2020-12-17 18:52 ` [PATCH v4 08/10] mm/gup: limit number of gup migration failures, honor failures Pavel Tatashin
2020-12-17 20:50   ` Jason Gunthorpe
2020-12-17 22:02     ` Pavel Tatashin
2020-12-17 22:02       ` Pavel Tatashin
2020-12-18 14:19       ` Jason Gunthorpe
2021-01-13 19:43         ` Pavel Tatashin
2021-01-13 19:43           ` Pavel Tatashin
2021-01-13 19:55           ` Jason Gunthorpe
2021-01-13 20:05             ` Pavel Tatashin
2021-01-13 20:05               ` Pavel Tatashin
2021-01-13 23:40               ` Jason Gunthorpe
2021-01-15 18:10               ` Pavel Tatashin
2021-01-15 18:10                 ` Pavel Tatashin
2021-01-15 18:40                 ` Jason Gunthorpe
2020-12-18 10:46   ` Michal Hocko
2020-12-18 12:43     ` Pavel Tatashin
2020-12-18 12:43       ` Pavel Tatashin
2020-12-18 13:04       ` David Hildenbrand
2020-12-18 13:14       ` Michal Hocko
2021-01-13 19:49         ` Pavel Tatashin
2021-01-13 19:49           ` Pavel Tatashin
2020-12-17 18:52 ` [PATCH v4 09/10] selftests/vm: test flag is broken Pavel Tatashin
2020-12-18  9:06   ` John Hubbard
2020-12-18  9:11     ` John Hubbard
2020-12-17 18:52 ` [PATCH v4 10/10] selftests/vm: test faulting in kernel, and verify pinnable pages Pavel Tatashin
2020-12-19  5:57   ` John Hubbard
2020-12-19 15:22     ` Pavel Tatashin
2020-12-19 15:22       ` Pavel Tatashin
2020-12-19 23:51       ` John Hubbard

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201217185243.3288048-6-pasha.tatashin@soleen.com \
    --to=pasha.tatashin@soleen.com \
    --cc=akpm@linux-foundation.org \
    --cc=dan.j.williams@intel.com \
    --cc=david@redhat.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=ira.weiny@intel.com \
    --cc=jgg@ziepe.ca \
    --cc=jhubbard@nvidia.com \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=osalvador@suse.de \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=rostedt@goodmis.org \
    --cc=sashal@kernel.org \
    --cc=tyhicks@linux.microsoft.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.