From: Zi Yan <zi.yan@sent.com>
To: Dave Hansen <dave.hansen@linux.intel.com>,
Yang Shi <yang.shi@linux.alibaba.com>,
Keith Busch <keith.busch@intel.com>,
Fengguang Wu <fengguang.wu@intel.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>,
Michal Hocko <mhocko@kernel.org>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Andrew Morton <akpm@linux-foundation.org>,
Vlastimil Babka <vbabka@suse.cz>,
Mel Gorman <mgorman@techsingularity.net>,
John Hubbard <jhubbard@nvidia.com>,
Mark Hairgrove <mhairgrove@nvidia.com>,
Nitin Gupta <nigupta@nvidia.com>,
Javier Cabezas <jcabezas@nvidia.com>,
David Nellans <dnellans@nvidia.com>, Zi Yan <ziy@nvidia.com>
Subject: [RFC PATCH 24/25] memory manage: limit migration batch size.
Date: Wed, 3 Apr 2019 19:00:45 -0700 [thread overview]
Message-ID: <20190404020046.32741-25-zi.yan@sent.com> (raw)
In-Reply-To: <20190404020046.32741-1-zi.yan@sent.com>
From: Zi Yan <ziy@nvidia.com>
Make migration batch size adjustable to avoid excessive migration
overheads when a lot of pages are under migration.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
kernel/sysctl.c | 8 ++++++++
mm/memory_manage.c | 60 ++++++++++++++++++++++++++++++++++++------------------
2 files changed, 48 insertions(+), 20 deletions(-)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b8712eb..b92e2da9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -105,6 +105,7 @@ extern int accel_page_copy;
extern unsigned int limit_mt_num;
extern int use_all_dma_chans;
extern int limit_dma_chans;
+extern int migration_batch_size;
/* External variables not in a header file. */
extern int suid_dumpable;
@@ -1470,6 +1471,13 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .procname = "migration_batch_size",
+ .data = &migration_batch_size,
+ .maxlen = sizeof(migration_batch_size),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "hugetlb_shm_group",
.data = &sysctl_hugetlb_shm_group,
.maxlen = sizeof(gid_t),
diff --git a/mm/memory_manage.c b/mm/memory_manage.c
index d63ad25..8b76fcf 100644
--- a/mm/memory_manage.c
+++ b/mm/memory_manage.c
@@ -16,6 +16,8 @@
#include "internal.h"
+int migration_batch_size = 16;
+
enum isolate_action {
ISOLATE_COLD_PAGES = 1,
ISOLATE_HOT_PAGES,
@@ -137,35 +139,49 @@ static unsigned long isolate_pages_from_lru_list(pg_data_t *pgdat,
}
static int migrate_to_node(struct list_head *page_list, int nid,
- enum migrate_mode mode)
+ enum migrate_mode mode, int batch_size)
{
bool migrate_concur = mode & MIGRATE_CONCUR;
+ bool unlimited_batch_size = (batch_size <=0 || !migrate_concur);
int num = 0;
- int from_nid;
+ int from_nid = -1;
int err;
if (list_empty(page_list))
return num;
- from_nid = page_to_nid(list_first_entry(page_list, struct page, lru));
+ while (!list_empty(page_list)) {
+ LIST_HEAD(batch_page_list);
+ int i;
- if (migrate_concur)
- err = migrate_pages_concur(page_list, alloc_new_node_page,
- NULL, nid, mode, MR_SYSCALL);
- else
- err = migrate_pages(page_list, alloc_new_node_page,
- NULL, nid, mode, MR_SYSCALL);
+ /* it should move all pages to batch_page_list if !migrate_concur */
+ for (i = 0; i < batch_size || unlimited_batch_size; i++) {
+ struct page *item = list_first_entry_or_null(page_list, struct page, lru);
+ if (!item)
+ break;
+ list_move(&item->lru, &batch_page_list);
+ }
- if (err) {
- struct page *page;
+ from_nid = page_to_nid(list_first_entry(&batch_page_list, struct page, lru));
- list_for_each_entry(page, page_list, lru)
- num += hpage_nr_pages(page);
- pr_debug("%d pages failed to migrate from %d to %d\n",
- num, from_nid, nid);
+ if (migrate_concur)
+ err = migrate_pages_concur(&batch_page_list, alloc_new_node_page,
+ NULL, nid, mode, MR_SYSCALL);
+ else
+ err = migrate_pages(&batch_page_list, alloc_new_node_page,
+ NULL, nid, mode, MR_SYSCALL);
- putback_movable_pages(page_list);
+ if (err) {
+ struct page *page;
+
+ list_for_each_entry(page, &batch_page_list, lru)
+ num += hpage_nr_pages(page);
+
+ putback_movable_pages(&batch_page_list);
+ }
}
+ pr_debug("%d pages failed to migrate from %d to %d\n",
+ num, from_nid, nid);
return num;
}
@@ -325,10 +341,12 @@ static int do_mm_manage(struct task_struct *p, struct mm_struct *mm,
/* Migrate pages to slow node */
/* No multi-threaded migration for base pages */
nr_isolated_fast_base_pages -=
- migrate_to_node(&fast_base_page_list, slow_nid, mode & ~MIGRATE_MT);
+ migrate_to_node(&fast_base_page_list, slow_nid,
+ mode & ~MIGRATE_MT, migration_batch_size);
nr_isolated_fast_huge_pages -=
- migrate_to_node(&fast_huge_page_list, slow_nid, mode);
+ migrate_to_node(&fast_huge_page_list, slow_nid, mode,
+ migration_batch_size);
}
if (nr_isolated_fast_base_pages != ULONG_MAX &&
@@ -342,10 +360,12 @@ static int do_mm_manage(struct task_struct *p, struct mm_struct *mm,
/* Migrate pages to fast node */
/* No multi-threaded migration for base pages */
nr_isolated_slow_base_pages -=
- migrate_to_node(&slow_base_page_list, fast_nid, mode & ~MIGRATE_MT);
+ migrate_to_node(&slow_base_page_list, fast_nid, mode & ~MIGRATE_MT,
+ migration_batch_size);
nr_isolated_slow_huge_pages -=
- migrate_to_node(&slow_huge_page_list, fast_nid, mode);
+ migrate_to_node(&slow_huge_page_list, fast_nid, mode,
+ migration_batch_size);
return err;
}
--
2.7.4
next prev parent reply other threads:[~2019-04-04 2:02 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-04 2:00 [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Zi Yan
2019-04-04 2:00 ` [RFC PATCH 01/25] mm: migrate: Change migrate_mode to support combination migration modes Zi Yan
2019-04-04 2:00 ` [RFC PATCH 02/25] mm: migrate: Add mode parameter to support future page copy routines Zi Yan
2019-04-04 2:00 ` [RFC PATCH 03/25] mm: migrate: Add a multi-threaded page migration function Zi Yan
2019-04-04 2:00 ` [RFC PATCH 04/25] mm: migrate: Add copy_page_multithread into migrate_pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 05/25] mm: migrate: Add vm.accel_page_copy in sysfs to control page copy acceleration Zi Yan
2019-04-04 2:00 ` [RFC PATCH 06/25] mm: migrate: Make the number of copy threads adjustable via sysctl Zi Yan
2019-04-04 2:00 ` [RFC PATCH 07/25] mm: migrate: Add copy_page_dma to use DMA Engine to copy pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 08/25] mm: migrate: Add copy_page_dma into migrate_page_copy Zi Yan
2019-04-04 2:00 ` [RFC PATCH 09/25] mm: migrate: Add copy_page_lists_dma_always to support copy a list of pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 10/25] mm: migrate: copy_page_lists_mt() to copy a page list using multi-threads Zi Yan
2019-04-04 2:00 ` [RFC PATCH 11/25] mm: migrate: Add concurrent page migration into move_pages syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 12/25] exchange pages: new page migration mechanism: exchange_pages() Zi Yan
2019-04-04 2:00 ` [RFC PATCH 13/25] exchange pages: add multi-threaded exchange pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 14/25] exchange pages: concurrent " Zi Yan
2019-04-04 2:00 ` [RFC PATCH 15/25] exchange pages: exchange anonymous page and file-backed page Zi Yan
2019-04-04 2:00 ` [RFC PATCH 16/25] exchange page: Add THP exchange support Zi Yan
2019-04-04 2:00 ` [RFC PATCH 17/25] exchange page: Add exchange_page() syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 18/25] memcg: Add per node memory usage&max stats in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 19/25] mempolicy: add MPOL_F_MEMCG flag, enforcing memcg memory limit Zi Yan
2019-04-04 2:00 ` [RFC PATCH 20/25] memory manage: Add memory manage syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 21/25] mm: move update_lru_sizes() to mm_inline.h for broader use Zi Yan
2019-04-04 2:00 ` [RFC PATCH 22/25] memory manage: active/inactive page list manipulation in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 23/25] memory manage: page migration based page manipulation between NUMA nodes Zi Yan
2019-04-04 2:00 ` Zi Yan [this message]
2019-04-04 2:00 ` [RFC PATCH 25/25] memory manage: use exchange pages to memory manage to improve throughput Zi Yan
2019-04-04 7:13 ` [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Michal Hocko
2019-04-05 0:32 ` Yang Shi
2019-04-05 17:20 ` Zi Yan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190404020046.32741-25-zi.yan@sent.com \
--to=zi.yan@sent.com \
--cc=akpm@linux-foundation.org \
--cc=daniel.m.jordan@oracle.com \
--cc=dave.hansen@linux.intel.com \
--cc=dnellans@nvidia.com \
--cc=fengguang.wu@intel.com \
--cc=jcabezas@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=keith.busch@intel.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mhairgrove@nvidia.com \
--cc=mhocko@kernel.org \
--cc=nigupta@nvidia.com \
--cc=vbabka@suse.cz \
--cc=yang.shi@linux.alibaba.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).