From: Zi Yan <zi.yan@sent.com>
To: Dave Hansen <dave.hansen@linux.intel.com>,
Yang Shi <yang.shi@linux.alibaba.com>,
Keith Busch <keith.busch@intel.com>,
Fengguang Wu <fengguang.wu@intel.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>,
Michal Hocko <mhocko@kernel.org>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Andrew Morton <akpm@linux-foundation.org>,
Vlastimil Babka <vbabka@suse.cz>,
Mel Gorman <mgorman@techsingularity.net>,
John Hubbard <jhubbard@nvidia.com>,
Mark Hairgrove <mhairgrove@nvidia.com>,
Nitin Gupta <nigupta@nvidia.com>,
Javier Cabezas <jcabezas@nvidia.com>,
David Nellans <dnellans@nvidia.com>, Zi Yan <ziy@nvidia.com>
Subject: [RFC PATCH 10/25] mm: migrate: copy_page_lists_mt() to copy a page list using multi-threads.
Date: Wed, 3 Apr 2019 19:00:31 -0700 [thread overview]
Message-ID: <20190404020046.32741-11-zi.yan@sent.com> (raw)
In-Reply-To: <20190404020046.32741-1-zi.yan@sent.com>
From: Zi Yan <ziy@nvidia.com>
This prepare the support for migrate_page_concur(), which migrates
multiple pages at the same time.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/copy_page.c | 123 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mm/internal.h | 2 +
2 files changed, 125 insertions(+)
diff --git a/mm/copy_page.c b/mm/copy_page.c
index 84f1c02..d2fd67e 100644
--- a/mm/copy_page.c
+++ b/mm/copy_page.c
@@ -126,6 +126,129 @@ int copy_page_multithread(struct page *to, struct page *from, int nr_pages)
return err;
}
+
+int copy_page_lists_mt(struct page **to, struct page **from, int nr_items)
+{
+ int err = 0;
+ unsigned int total_mt_num = limit_mt_num;
+ int to_node = page_to_nid(*to);
+ int i;
+ struct copy_page_info *work_items[NR_CPUS] = {0};
+ const struct cpumask *per_node_cpumask = cpumask_of_node(to_node);
+ int cpu_id_list[NR_CPUS] = {0};
+ int cpu;
+ int max_items_per_thread;
+ int item_idx;
+
+ total_mt_num = min_t(unsigned int, total_mt_num,
+ cpumask_weight(per_node_cpumask));
+
+
+ if (total_mt_num > num_online_cpus())
+ return -ENODEV;
+
+ /* Each threads get part of each page, if nr_items < totla_mt_num */
+ if (nr_items < total_mt_num)
+ max_items_per_thread = nr_items;
+ else
+ max_items_per_thread = (nr_items / total_mt_num) +
+ ((nr_items % total_mt_num)?1:0);
+
+
+ for (cpu = 0; cpu < total_mt_num; ++cpu) {
+ work_items[cpu] = kzalloc(sizeof(struct copy_page_info) +
+ sizeof(struct copy_item)*max_items_per_thread, GFP_KERNEL);
+ if (!work_items[cpu]) {
+ err = -ENOMEM;
+ goto free_work_items;
+ }
+ }
+
+ i = 0;
+ for_each_cpu(cpu, per_node_cpumask) {
+ if (i >= total_mt_num)
+ break;
+ cpu_id_list[i] = cpu;
+ ++i;
+ }
+
+ if (nr_items < total_mt_num) {
+ for (cpu = 0; cpu < total_mt_num; ++cpu) {
+ INIT_WORK((struct work_struct *)work_items[cpu],
+ copy_page_work_queue_thread);
+ work_items[cpu]->num_items = max_items_per_thread;
+ }
+
+ for (item_idx = 0; item_idx < nr_items; ++item_idx) {
+ unsigned long chunk_size = PAGE_SIZE * hpage_nr_pages(from[item_idx]) / total_mt_num;
+ char *vfrom = kmap(from[item_idx]);
+ char *vto = kmap(to[item_idx]);
+ VM_BUG_ON(PAGE_SIZE * hpage_nr_pages(from[item_idx]) % total_mt_num);
+ BUG_ON(hpage_nr_pages(to[item_idx]) !=
+ hpage_nr_pages(from[item_idx]));
+
+ for (cpu = 0; cpu < total_mt_num; ++cpu) {
+ work_items[cpu]->item_list[item_idx].to = vto + chunk_size * cpu;
+ work_items[cpu]->item_list[item_idx].from = vfrom + chunk_size * cpu;
+ work_items[cpu]->item_list[item_idx].chunk_size =
+ chunk_size;
+ }
+ }
+
+ for (cpu = 0; cpu < total_mt_num; ++cpu)
+ queue_work_on(cpu_id_list[cpu],
+ system_highpri_wq,
+ (struct work_struct *)work_items[cpu]);
+ } else {
+ item_idx = 0;
+ for (cpu = 0; cpu < total_mt_num; ++cpu) {
+ int num_xfer_per_thread = nr_items / total_mt_num;
+ int per_cpu_item_idx;
+
+ if (cpu < (nr_items % total_mt_num))
+ num_xfer_per_thread += 1;
+
+ INIT_WORK((struct work_struct *)work_items[cpu],
+ copy_page_work_queue_thread);
+
+ work_items[cpu]->num_items = num_xfer_per_thread;
+ for (per_cpu_item_idx = 0; per_cpu_item_idx < work_items[cpu]->num_items;
+ ++per_cpu_item_idx, ++item_idx) {
+ work_items[cpu]->item_list[per_cpu_item_idx].to = kmap(to[item_idx]);
+ work_items[cpu]->item_list[per_cpu_item_idx].from =
+ kmap(from[item_idx]);
+ work_items[cpu]->item_list[per_cpu_item_idx].chunk_size =
+ PAGE_SIZE * hpage_nr_pages(from[item_idx]);
+
+ BUG_ON(hpage_nr_pages(to[item_idx]) !=
+ hpage_nr_pages(from[item_idx]));
+ }
+
+ queue_work_on(cpu_id_list[cpu],
+ system_highpri_wq,
+ (struct work_struct *)work_items[cpu]);
+ }
+ if (item_idx != nr_items)
+ pr_err("%s: only %d out of %d pages are transferred\n", __func__,
+ item_idx - 1, nr_items);
+ }
+
+ /* Wait until it finishes */
+ for (i = 0; i < total_mt_num; ++i)
+ flush_work((struct work_struct *)work_items[i]);
+
+ for (i = 0; i < nr_items; ++i) {
+ kunmap(to[i]);
+ kunmap(from[i]);
+ }
+
+free_work_items:
+ for (cpu = 0; cpu < total_mt_num; ++cpu)
+ if (work_items[cpu])
+ kfree(work_items[cpu]);
+
+ return err;
+}
/* ======================== DMA copy page ======================== */
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
diff --git a/mm/internal.h b/mm/internal.h
index cb1a610..51f5e1b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -558,5 +558,7 @@ extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
extern int copy_page_lists_dma_always(struct page **to,
struct page **from, int nr_pages);
+extern int copy_page_lists_mt(struct page **to,
+ struct page **from, int nr_pages);
#endif /* __MM_INTERNAL_H */
--
2.7.4
next prev parent reply other threads:[~2019-04-04 2:01 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-04 2:00 [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Zi Yan
2019-04-04 2:00 ` [RFC PATCH 01/25] mm: migrate: Change migrate_mode to support combination migration modes Zi Yan
2019-04-04 2:00 ` [RFC PATCH 02/25] mm: migrate: Add mode parameter to support future page copy routines Zi Yan
2019-04-04 2:00 ` [RFC PATCH 03/25] mm: migrate: Add a multi-threaded page migration function Zi Yan
2019-04-04 2:00 ` [RFC PATCH 04/25] mm: migrate: Add copy_page_multithread into migrate_pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 05/25] mm: migrate: Add vm.accel_page_copy in sysfs to control page copy acceleration Zi Yan
2019-04-04 2:00 ` [RFC PATCH 06/25] mm: migrate: Make the number of copy threads adjustable via sysctl Zi Yan
2019-04-04 2:00 ` [RFC PATCH 07/25] mm: migrate: Add copy_page_dma to use DMA Engine to copy pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 08/25] mm: migrate: Add copy_page_dma into migrate_page_copy Zi Yan
2019-04-04 2:00 ` [RFC PATCH 09/25] mm: migrate: Add copy_page_lists_dma_always to support copy a list of pages Zi Yan
2019-04-04 2:00 ` Zi Yan [this message]
2019-04-04 2:00 ` [RFC PATCH 11/25] mm: migrate: Add concurrent page migration into move_pages syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 12/25] exchange pages: new page migration mechanism: exchange_pages() Zi Yan
2019-04-04 2:00 ` [RFC PATCH 13/25] exchange pages: add multi-threaded exchange pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 14/25] exchange pages: concurrent " Zi Yan
2019-04-04 2:00 ` [RFC PATCH 15/25] exchange pages: exchange anonymous page and file-backed page Zi Yan
2019-04-04 2:00 ` [RFC PATCH 16/25] exchange page: Add THP exchange support Zi Yan
2019-04-04 2:00 ` [RFC PATCH 17/25] exchange page: Add exchange_page() syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 18/25] memcg: Add per node memory usage&max stats in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 19/25] mempolicy: add MPOL_F_MEMCG flag, enforcing memcg memory limit Zi Yan
2019-04-04 2:00 ` [RFC PATCH 20/25] memory manage: Add memory manage syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 21/25] mm: move update_lru_sizes() to mm_inline.h for broader use Zi Yan
2019-04-04 2:00 ` [RFC PATCH 22/25] memory manage: active/inactive page list manipulation in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 23/25] memory manage: page migration based page manipulation between NUMA nodes Zi Yan
2019-04-04 2:00 ` [RFC PATCH 24/25] memory manage: limit migration batch size Zi Yan
2019-04-04 2:00 ` [RFC PATCH 25/25] memory manage: use exchange pages to memory manage to improve throughput Zi Yan
2019-04-04 7:13 ` [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Michal Hocko
2019-04-05 0:32 ` Yang Shi
2019-04-05 17:20 ` Zi Yan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190404020046.32741-11-zi.yan@sent.com \
--to=zi.yan@sent.com \
--cc=akpm@linux-foundation.org \
--cc=daniel.m.jordan@oracle.com \
--cc=dave.hansen@linux.intel.com \
--cc=dnellans@nvidia.com \
--cc=fengguang.wu@intel.com \
--cc=jcabezas@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=keith.busch@intel.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mhairgrove@nvidia.com \
--cc=mhocko@kernel.org \
--cc=nigupta@nvidia.com \
--cc=vbabka@suse.cz \
--cc=yang.shi@linux.alibaba.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).