From: Zi Yan <zi.yan@sent.com>
To: Dave Hansen <dave.hansen@linux.intel.com>,
Yang Shi <yang.shi@linux.alibaba.com>,
Keith Busch <keith.busch@intel.com>,
Fengguang Wu <fengguang.wu@intel.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>,
Michal Hocko <mhocko@kernel.org>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Andrew Morton <akpm@linux-foundation.org>,
Vlastimil Babka <vbabka@suse.cz>,
Mel Gorman <mgorman@techsingularity.net>,
John Hubbard <jhubbard@nvidia.com>,
Mark Hairgrove <mhairgrove@nvidia.com>,
Nitin Gupta <nigupta@nvidia.com>,
Javier Cabezas <jcabezas@nvidia.com>,
David Nellans <dnellans@nvidia.com>, Zi Yan <ziy@nvidia.com>
Subject: [RFC PATCH 09/25] mm: migrate: Add copy_page_lists_dma_always to support copy a list of pages.
Date: Wed, 3 Apr 2019 19:00:30 -0700 [thread overview]
Message-ID: <20190404020046.32741-10-zi.yan@sent.com> (raw)
In-Reply-To: <20190404020046.32741-1-zi.yan@sent.com>
From: Zi Yan <ziy@nvidia.com>
Both src and dst page lists should match the page size at each
page and the length of both lists is shared.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/copy_page.c | 166 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mm/internal.h | 4 ++
2 files changed, 170 insertions(+)
diff --git a/mm/copy_page.c b/mm/copy_page.c
index 5e7a797..84f1c02 100644
--- a/mm/copy_page.c
+++ b/mm/copy_page.c
@@ -417,3 +417,169 @@ int copy_page_dma(struct page *to, struct page *from, int nr_pages)
return copy_page_dma_always(to, from, nr_pages);
}
+
+/*
+ * Use DMA copy a list of pages to a new location
+ *
+ * Just put each page into individual DMA channel.
+ *
+ * */
+int copy_page_lists_dma_always(struct page **to, struct page **from, int nr_items)
+{
+ struct dma_async_tx_descriptor **tx = NULL;
+ dma_cookie_t *cookie = NULL;
+ enum dma_ctrl_flags flags[NUM_AVAIL_DMA_CHAN] = {0};
+ struct dmaengine_unmap_data *unmap[NUM_AVAIL_DMA_CHAN] = {0};
+ int ret_val = 0;
+ int total_available_chans = NUM_AVAIL_DMA_CHAN;
+ int i;
+ int page_idx;
+
+ for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+ if (!copy_chan[i]) {
+ total_available_chans = i;
+ }
+ }
+ if (total_available_chans != NUM_AVAIL_DMA_CHAN) {
+ pr_err("%d channels are missing\n", NUM_AVAIL_DMA_CHAN - total_available_chans);
+ }
+ if (limit_dma_chans < total_available_chans)
+ total_available_chans = limit_dma_chans;
+
+ /* round down to closest 2^x value */
+ total_available_chans = 1<<ilog2(total_available_chans);
+
+ total_available_chans = min_t(int, total_available_chans, nr_items);
+
+
+ tx = kzalloc(sizeof(struct dma_async_tx_descriptor*)*nr_items, GFP_KERNEL);
+ if (!tx) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+ cookie = kzalloc(sizeof(dma_cookie_t)*nr_items, GFP_KERNEL);
+ if (!cookie) {
+ ret_val = -ENOMEM;
+ goto out_free_tx;
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ int num_xfer_per_dev = nr_items / total_available_chans;
+
+ if (i < (nr_items % total_available_chans))
+ num_xfer_per_dev += 1;
+
+ if (num_xfer_per_dev > 128) {
+ ret_val = -ENOMEM;
+ pr_err("%s: too many pages to be transferred\n", __func__);
+ goto out_free_both;
+ }
+
+ unmap[i] = dmaengine_get_unmap_data(copy_dev[i]->dev,
+ 2 * num_xfer_per_dev, GFP_NOWAIT);
+ if (!unmap[i]) {
+ pr_err("%s: no unmap data at chan %d\n", __func__, i);
+ ret_val = -ENODEV;
+ goto unmap_dma;
+ }
+ }
+
+ page_idx = 0;
+ for (i = 0; i < total_available_chans; ++i) {
+ int num_xfer_per_dev = nr_items / total_available_chans;
+ int xfer_idx;
+
+ if (i < (nr_items % total_available_chans))
+ num_xfer_per_dev += 1;
+
+ unmap[i]->to_cnt = num_xfer_per_dev;
+ unmap[i]->from_cnt = num_xfer_per_dev;
+ unmap[i]->len = hpage_nr_pages(from[i]) * PAGE_SIZE;
+
+ for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx, ++page_idx) {
+ size_t page_len = hpage_nr_pages(from[page_idx]) * PAGE_SIZE;
+
+ BUG_ON(page_len != hpage_nr_pages(to[page_idx]) * PAGE_SIZE);
+ BUG_ON(unmap[i]->len != page_len);
+
+ unmap[i]->addr[xfer_idx] =
+ dma_map_page(copy_dev[i]->dev, from[page_idx],
+ 0,
+ page_len,
+ DMA_TO_DEVICE);
+
+ unmap[i]->addr[xfer_idx+num_xfer_per_dev] =
+ dma_map_page(copy_dev[i]->dev, to[page_idx],
+ 0,
+ page_len,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ page_idx = 0;
+ for (i = 0; i < total_available_chans; ++i) {
+ int num_xfer_per_dev = nr_items / total_available_chans;
+ int xfer_idx;
+
+ if (i < (nr_items % total_available_chans))
+ num_xfer_per_dev += 1;
+
+ for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx, ++page_idx) {
+
+ tx[page_idx] = copy_dev[i]->device_prep_dma_memcpy(copy_chan[i],
+ unmap[i]->addr[xfer_idx + num_xfer_per_dev],
+ unmap[i]->addr[xfer_idx],
+ unmap[i]->len,
+ flags[i]);
+ if (!tx[page_idx]) {
+ pr_err("%s: no tx descriptor at chan %d xfer %d\n",
+ __func__, i, xfer_idx);
+ ret_val = -ENODEV;
+ goto unmap_dma;
+ }
+
+ cookie[page_idx] = tx[page_idx]->tx_submit(tx[page_idx]);
+
+ if (dma_submit_error(cookie[page_idx])) {
+ pr_err("%s: submission error at chan %d xfer %d\n",
+ __func__, i, xfer_idx);
+ ret_val = -ENODEV;
+ goto unmap_dma;
+ }
+ }
+
+ dma_async_issue_pending(copy_chan[i]);
+ }
+
+ page_idx = 0;
+ for (i = 0; i < total_available_chans; ++i) {
+ int num_xfer_per_dev = nr_items / total_available_chans;
+ int xfer_idx;
+
+ if (i < (nr_items % total_available_chans))
+ num_xfer_per_dev += 1;
+
+ for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx, ++page_idx) {
+
+ if (dma_sync_wait(copy_chan[i], cookie[page_idx]) != DMA_COMPLETE) {
+ ret_val = -6;
+ pr_err("%s: dma does not complete at chan %d, xfer %d\n",
+ __func__, i, xfer_idx);
+ }
+ }
+ }
+
+unmap_dma:
+ for (i = 0; i < total_available_chans; ++i) {
+ if (unmap[i])
+ dmaengine_unmap_put(unmap[i]);
+ }
+
+out_free_both:
+ kfree(cookie);
+out_free_tx:
+ kfree(tx);
+out:
+
+ return ret_val;
+}
diff --git a/mm/internal.h b/mm/internal.h
index 9eeaf2b..cb1a610 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -555,4 +555,8 @@ static inline bool is_migrate_highatomic_page(struct page *page)
void setup_zone_pageset(struct zone *zone);
extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
+
+extern int copy_page_lists_dma_always(struct page **to,
+ struct page **from, int nr_pages);
+
#endif /* __MM_INTERNAL_H */
--
2.7.4
next prev parent reply other threads:[~2019-04-04 2:10 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-04 2:00 [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Zi Yan
2019-04-04 2:00 ` [RFC PATCH 01/25] mm: migrate: Change migrate_mode to support combination migration modes Zi Yan
2019-04-04 2:00 ` [RFC PATCH 02/25] mm: migrate: Add mode parameter to support future page copy routines Zi Yan
2019-04-04 2:00 ` [RFC PATCH 03/25] mm: migrate: Add a multi-threaded page migration function Zi Yan
2019-04-04 2:00 ` [RFC PATCH 04/25] mm: migrate: Add copy_page_multithread into migrate_pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 05/25] mm: migrate: Add vm.accel_page_copy in sysfs to control page copy acceleration Zi Yan
2019-04-04 2:00 ` [RFC PATCH 06/25] mm: migrate: Make the number of copy threads adjustable via sysctl Zi Yan
2019-04-04 2:00 ` [RFC PATCH 07/25] mm: migrate: Add copy_page_dma to use DMA Engine to copy pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 08/25] mm: migrate: Add copy_page_dma into migrate_page_copy Zi Yan
2019-04-04 2:00 ` Zi Yan [this message]
2019-04-04 2:00 ` [RFC PATCH 10/25] mm: migrate: copy_page_lists_mt() to copy a page list using multi-threads Zi Yan
2019-04-04 2:00 ` [RFC PATCH 11/25] mm: migrate: Add concurrent page migration into move_pages syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 12/25] exchange pages: new page migration mechanism: exchange_pages() Zi Yan
2019-04-04 2:00 ` [RFC PATCH 13/25] exchange pages: add multi-threaded exchange pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 14/25] exchange pages: concurrent " Zi Yan
2019-04-04 2:00 ` [RFC PATCH 15/25] exchange pages: exchange anonymous page and file-backed page Zi Yan
2019-04-04 2:00 ` [RFC PATCH 16/25] exchange page: Add THP exchange support Zi Yan
2019-04-04 2:00 ` [RFC PATCH 17/25] exchange page: Add exchange_page() syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 18/25] memcg: Add per node memory usage&max stats in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 19/25] mempolicy: add MPOL_F_MEMCG flag, enforcing memcg memory limit Zi Yan
2019-04-04 2:00 ` [RFC PATCH 20/25] memory manage: Add memory manage syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 21/25] mm: move update_lru_sizes() to mm_inline.h for broader use Zi Yan
2019-04-04 2:00 ` [RFC PATCH 22/25] memory manage: active/inactive page list manipulation in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 23/25] memory manage: page migration based page manipulation between NUMA nodes Zi Yan
2019-04-04 2:00 ` [RFC PATCH 24/25] memory manage: limit migration batch size Zi Yan
2019-04-04 2:00 ` [RFC PATCH 25/25] memory manage: use exchange pages to memory manage to improve throughput Zi Yan
2019-04-04 7:13 ` [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Michal Hocko
2019-04-05 0:32 ` Yang Shi
2019-04-05 17:20 ` Zi Yan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190404020046.32741-10-zi.yan@sent.com \
--to=zi.yan@sent.com \
--cc=akpm@linux-foundation.org \
--cc=daniel.m.jordan@oracle.com \
--cc=dave.hansen@linux.intel.com \
--cc=dnellans@nvidia.com \
--cc=fengguang.wu@intel.com \
--cc=jcabezas@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=keith.busch@intel.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mhairgrove@nvidia.com \
--cc=mhocko@kernel.org \
--cc=nigupta@nvidia.com \
--cc=vbabka@suse.cz \
--cc=yang.shi@linux.alibaba.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).