From: Zi Yan <zi.yan@sent.com>
To: Dave Hansen <dave.hansen@linux.intel.com>,
Yang Shi <yang.shi@linux.alibaba.com>,
Keith Busch <keith.busch@intel.com>,
Fengguang Wu <fengguang.wu@intel.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>,
Michal Hocko <mhocko@kernel.org>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Andrew Morton <akpm@linux-foundation.org>,
Vlastimil Babka <vbabka@suse.cz>,
Mel Gorman <mgorman@techsingularity.net>,
John Hubbard <jhubbard@nvidia.com>,
Mark Hairgrove <mhairgrove@nvidia.com>,
Nitin Gupta <nigupta@nvidia.com>,
Javier Cabezas <jcabezas@nvidia.com>,
David Nellans <dnellans@nvidia.com>, Zi Yan <ziy@nvidia.com>
Subject: [RFC PATCH 07/25] mm: migrate: Add copy_page_dma to use DMA Engine to copy pages.
Date: Wed, 3 Apr 2019 19:00:28 -0700 [thread overview]
Message-ID: <20190404020046.32741-8-zi.yan@sent.com> (raw)
In-Reply-To: <20190404020046.32741-1-zi.yan@sent.com>
From: Zi Yan <ziy@nvidia.com>
vm.use_all_dma_chans will grab all usable DMA channels
vm.limit_dma_chans will limit how many DMA channels in use
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/highmem.h | 1 +
include/linux/sched/sysctl.h | 3 +
kernel/sysctl.c | 19 +++
mm/copy_page.c | 291 +++++++++++++++++++++++++++++++++++++++++++
4 files changed, 314 insertions(+)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 0f50dc5..119bb39 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -277,5 +277,6 @@ static inline void copy_highpage(struct page *to, struct page *from)
#endif
int copy_page_multithread(struct page *to, struct page *from, int nr_pages);
+int copy_page_dma(struct page *to, struct page *from, int nr_pages);
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 99ce6d7..ce11241 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -90,4 +90,7 @@ extern int sched_energy_aware_handler(struct ctl_table *table, int write,
loff_t *ppos);
#endif
+extern int sysctl_dma_page_migration(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
#endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0eae0b8..b8712eb 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -103,6 +103,8 @@
extern int accel_page_copy;
extern unsigned int limit_mt_num;
+extern int use_all_dma_chans;
+extern int limit_dma_chans;
/* External variables not in a header file. */
extern int suid_dumpable;
@@ -1451,6 +1453,23 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .procname = "use_all_dma_chans",
+ .data = &use_all_dma_chans,
+ .maxlen = sizeof(use_all_dma_chans),
+ .mode = 0644,
+ .proc_handler = sysctl_dma_page_migration,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "limit_dma_chans",
+ .data = &limit_dma_chans,
+ .maxlen = sizeof(limit_dma_chans),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ },
+ {
.procname = "hugetlb_shm_group",
.data = &sysctl_hugetlb_shm_group,
.maxlen = sizeof(gid_t),
diff --git a/mm/copy_page.c b/mm/copy_page.c
index 6665e3d..5e7a797 100644
--- a/mm/copy_page.c
+++ b/mm/copy_page.c
@@ -126,3 +126,294 @@ int copy_page_multithread(struct page *to, struct page *from, int nr_pages)
return err;
}
+/* ======================== DMA copy page ======================== */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define NUM_AVAIL_DMA_CHAN 16
+
+
+int use_all_dma_chans = 0;
+int limit_dma_chans = NUM_AVAIL_DMA_CHAN;
+
+
+struct dma_chan *copy_chan[NUM_AVAIL_DMA_CHAN] = {0};
+struct dma_device *copy_dev[NUM_AVAIL_DMA_CHAN] = {0};
+
+
+
+#ifdef CONFIG_PROC_SYSCTL
+extern int proc_dointvec_minmax(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int sysctl_dma_page_migration(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int err = 0;
+ int use_all_dma_chans_prior_val = use_all_dma_chans;
+ dma_cap_mask_t copy_mask;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (err < 0)
+ return err;
+ if (write) {
+ /* Grab all DMA channels */
+ if (use_all_dma_chans_prior_val == 0 && use_all_dma_chans == 1) {
+ int i;
+
+ dma_cap_zero(copy_mask);
+ dma_cap_set(DMA_MEMCPY, copy_mask);
+
+ dmaengine_get();
+ for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+ if (!copy_chan[i]) {
+ copy_chan[i] = dma_request_channel(copy_mask, NULL, NULL);
+ }
+ if (!copy_chan[i]) {
+ pr_err("%s: cannot grab channel: %d\n", __func__, i);
+ continue;
+ }
+
+ copy_dev[i] = copy_chan[i]->device;
+
+ if (!copy_dev[i]) {
+ pr_err("%s: no device: %d\n", __func__, i);
+ continue;
+ }
+ }
+
+ }
+ /* Release all DMA channels */
+ else if (use_all_dma_chans_prior_val == 1 && use_all_dma_chans == 0) {
+ int i;
+
+ for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+ if (copy_chan[i]) {
+ dma_release_channel(copy_chan[i]);
+ copy_chan[i] = NULL;
+ copy_dev[i] = NULL;
+ }
+ }
+
+ dmaengine_put();
+ }
+
+ if (err)
+ use_all_dma_chans = use_all_dma_chans_prior_val;
+ }
+ return err;
+}
+
+#endif
+
+static int copy_page_dma_once(struct page *to, struct page *from, int nr_pages)
+{
+ static struct dma_chan *copy_chan = NULL;
+ struct dma_device *device = NULL;
+ struct dma_async_tx_descriptor *tx = NULL;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags = 0;
+ struct dmaengine_unmap_data *unmap = NULL;
+ dma_cap_mask_t mask;
+ int ret_val = 0;
+
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dmaengine_get();
+
+ copy_chan = dma_request_channel(mask, NULL, NULL);
+
+ if (!copy_chan) {
+ pr_err("%s: cannot get a channel\n", __func__);
+ ret_val = -1;
+ goto no_chan;
+ }
+
+ device = copy_chan->device;
+
+ if (!device) {
+ pr_err("%s: cannot get a device\n", __func__);
+ ret_val = -2;
+ goto release;
+ }
+
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
+
+ if (!unmap) {
+ pr_err("%s: cannot get unmap data\n", __func__);
+ ret_val = -3;
+ goto release;
+ }
+
+ unmap->to_cnt = 1;
+ unmap->addr[0] = dma_map_page(device->dev, from, 0, PAGE_SIZE*nr_pages,
+ DMA_TO_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dma_map_page(device->dev, to, 0, PAGE_SIZE*nr_pages,
+ DMA_FROM_DEVICE);
+ unmap->len = PAGE_SIZE*nr_pages;
+
+ tx = device->device_prep_dma_memcpy(copy_chan,
+ unmap->addr[1],
+ unmap->addr[0], unmap->len,
+ flags);
+
+ if (!tx) {
+ pr_err("%s: null tx descriptor\n", __func__);
+ ret_val = -4;
+ goto unmap_dma;
+ }
+
+ cookie = tx->tx_submit(tx);
+
+ if (dma_submit_error(cookie)) {
+ pr_err("%s: submission error\n", __func__);
+ ret_val = -5;
+ goto unmap_dma;
+ }
+
+ if (dma_sync_wait(copy_chan, cookie) != DMA_COMPLETE) {
+ pr_err("%s: dma does not complete properly\n", __func__);
+ ret_val = -6;
+ }
+
+unmap_dma:
+ dmaengine_unmap_put(unmap);
+release:
+ if (copy_chan) {
+ dma_release_channel(copy_chan);
+ }
+no_chan:
+ dmaengine_put();
+
+ return ret_val;
+}
+
+static int copy_page_dma_always(struct page *to, struct page *from, int nr_pages)
+{
+ struct dma_async_tx_descriptor *tx[NUM_AVAIL_DMA_CHAN] = {0};
+ dma_cookie_t cookie[NUM_AVAIL_DMA_CHAN];
+ enum dma_ctrl_flags flags[NUM_AVAIL_DMA_CHAN] = {0};
+ struct dmaengine_unmap_data *unmap[NUM_AVAIL_DMA_CHAN] = {0};
+ int ret_val = 0;
+ int total_available_chans = NUM_AVAIL_DMA_CHAN;
+ int i;
+ size_t page_offset;
+
+ for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+ if (!copy_chan[i]) {
+ total_available_chans = i;
+ }
+ }
+ if (total_available_chans != NUM_AVAIL_DMA_CHAN) {
+ pr_err("%d channels are missing", NUM_AVAIL_DMA_CHAN - total_available_chans);
+ }
+
+ total_available_chans = min_t(int, total_available_chans, limit_dma_chans);
+
+ /* round down to closest 2^x value */
+ total_available_chans = 1<<ilog2(total_available_chans);
+
+ if ((nr_pages != 1) && (nr_pages % total_available_chans != 0))
+ return -5;
+
+ for (i = 0; i < total_available_chans; ++i) {
+ unmap[i] = dmaengine_get_unmap_data(copy_dev[i]->dev, 2, GFP_NOWAIT);
+ if (!unmap[i]) {
+ pr_err("%s: no unmap data at chan %d\n", __func__, i);
+ ret_val = -3;
+ goto unmap_dma;
+ }
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ if (nr_pages == 1) {
+ page_offset = PAGE_SIZE / total_available_chans;
+
+ unmap[i]->to_cnt = 1;
+ unmap[i]->addr[0] = dma_map_page(copy_dev[i]->dev, from, page_offset*i,
+ page_offset,
+ DMA_TO_DEVICE);
+ unmap[i]->from_cnt = 1;
+ unmap[i]->addr[1] = dma_map_page(copy_dev[i]->dev, to, page_offset*i,
+ page_offset,
+ DMA_FROM_DEVICE);
+ unmap[i]->len = page_offset;
+ } else {
+ page_offset = nr_pages / total_available_chans;
+
+ unmap[i]->to_cnt = 1;
+ unmap[i]->addr[0] = dma_map_page(copy_dev[i]->dev,
+ from + page_offset*i,
+ 0,
+ PAGE_SIZE*page_offset,
+ DMA_TO_DEVICE);
+ unmap[i]->from_cnt = 1;
+ unmap[i]->addr[1] = dma_map_page(copy_dev[i]->dev,
+ to + page_offset*i,
+ 0,
+ PAGE_SIZE*page_offset,
+ DMA_FROM_DEVICE);
+ unmap[i]->len = PAGE_SIZE*page_offset;
+ }
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ tx[i] = copy_dev[i]->device_prep_dma_memcpy(copy_chan[i],
+ unmap[i]->addr[1],
+ unmap[i]->addr[0],
+ unmap[i]->len,
+ flags[i]);
+ if (!tx[i]) {
+ pr_err("%s: no tx descriptor at chan %d\n", __func__, i);
+ ret_val = -4;
+ goto unmap_dma;
+ }
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ cookie[i] = tx[i]->tx_submit(tx[i]);
+
+ if (dma_submit_error(cookie[i])) {
+ pr_err("%s: submission error at chan %d\n", __func__, i);
+ ret_val = -5;
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(copy_chan[i]);
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ if (dma_sync_wait(copy_chan[i], cookie[i]) != DMA_COMPLETE) {
+ ret_val = -6;
+ pr_err("%s: dma does not complete at chan %d\n", __func__, i);
+ }
+ }
+
+unmap_dma:
+
+ for (i = 0; i < total_available_chans; ++i) {
+ if (unmap[i])
+ dmaengine_unmap_put(unmap[i]);
+ }
+
+ return ret_val;
+}
+
+int copy_page_dma(struct page *to, struct page *from, int nr_pages)
+{
+ BUG_ON(hpage_nr_pages(from) != nr_pages);
+ BUG_ON(hpage_nr_pages(to) != nr_pages);
+
+ if (!use_all_dma_chans) {
+ return copy_page_dma_once(to, from, nr_pages);
+ }
+
+ return copy_page_dma_always(to, from, nr_pages);
+}
--
2.7.4
next prev parent reply other threads:[~2019-04-04 2:01 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-04 2:00 [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Zi Yan
2019-04-04 2:00 ` [RFC PATCH 01/25] mm: migrate: Change migrate_mode to support combination migration modes Zi Yan
2019-04-04 2:00 ` [RFC PATCH 02/25] mm: migrate: Add mode parameter to support future page copy routines Zi Yan
2019-04-04 2:00 ` [RFC PATCH 03/25] mm: migrate: Add a multi-threaded page migration function Zi Yan
2019-04-04 2:00 ` [RFC PATCH 04/25] mm: migrate: Add copy_page_multithread into migrate_pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 05/25] mm: migrate: Add vm.accel_page_copy in sysfs to control page copy acceleration Zi Yan
2019-04-04 2:00 ` [RFC PATCH 06/25] mm: migrate: Make the number of copy threads adjustable via sysctl Zi Yan
2019-04-04 2:00 ` Zi Yan [this message]
2019-04-04 2:00 ` [RFC PATCH 08/25] mm: migrate: Add copy_page_dma into migrate_page_copy Zi Yan
2019-04-04 2:00 ` [RFC PATCH 09/25] mm: migrate: Add copy_page_lists_dma_always to support copy a list of pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 10/25] mm: migrate: copy_page_lists_mt() to copy a page list using multi-threads Zi Yan
2019-04-04 2:00 ` [RFC PATCH 11/25] mm: migrate: Add concurrent page migration into move_pages syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 12/25] exchange pages: new page migration mechanism: exchange_pages() Zi Yan
2019-04-04 2:00 ` [RFC PATCH 13/25] exchange pages: add multi-threaded exchange pages Zi Yan
2019-04-04 2:00 ` [RFC PATCH 14/25] exchange pages: concurrent " Zi Yan
2019-04-04 2:00 ` [RFC PATCH 15/25] exchange pages: exchange anonymous page and file-backed page Zi Yan
2019-04-04 2:00 ` [RFC PATCH 16/25] exchange page: Add THP exchange support Zi Yan
2019-04-04 2:00 ` [RFC PATCH 17/25] exchange page: Add exchange_page() syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 18/25] memcg: Add per node memory usage&max stats in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 19/25] mempolicy: add MPOL_F_MEMCG flag, enforcing memcg memory limit Zi Yan
2019-04-04 2:00 ` [RFC PATCH 20/25] memory manage: Add memory manage syscall Zi Yan
2019-04-04 2:00 ` [RFC PATCH 21/25] mm: move update_lru_sizes() to mm_inline.h for broader use Zi Yan
2019-04-04 2:00 ` [RFC PATCH 22/25] memory manage: active/inactive page list manipulation in memcg Zi Yan
2019-04-04 2:00 ` [RFC PATCH 23/25] memory manage: page migration based page manipulation between NUMA nodes Zi Yan
2019-04-04 2:00 ` [RFC PATCH 24/25] memory manage: limit migration batch size Zi Yan
2019-04-04 2:00 ` [RFC PATCH 25/25] memory manage: use exchange pages to memory manage to improve throughput Zi Yan
2019-04-04 7:13 ` [RFC PATCH 00/25] Accelerate page migration and use memcg for PMEM management Michal Hocko
2019-04-05 0:32 ` Yang Shi
2019-04-05 17:20 ` Zi Yan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190404020046.32741-8-zi.yan@sent.com \
--to=zi.yan@sent.com \
--cc=akpm@linux-foundation.org \
--cc=daniel.m.jordan@oracle.com \
--cc=dave.hansen@linux.intel.com \
--cc=dnellans@nvidia.com \
--cc=fengguang.wu@intel.com \
--cc=jcabezas@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=keith.busch@intel.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mhairgrove@nvidia.com \
--cc=mhocko@kernel.org \
--cc=nigupta@nvidia.com \
--cc=vbabka@suse.cz \
--cc=yang.shi@linux.alibaba.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).