From: jglisse@redhat.com
To: akpm@linux-foundation.org
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
"Linus Torvalds" <torvalds@linux-foundation.org>,
joro@8bytes.org, "Mel Gorman" <mgorman@suse.de>,
"H. Peter Anvin" <hpa@zytor.com>,
"Peter Zijlstra" <peterz@infradead.org>,
"Andrea Arcangeli" <aarcange@redhat.com>,
"Johannes Weiner" <jweiner@redhat.com>,
"Larry Woodman" <lwoodman@redhat.com>,
"Rik van Riel" <riel@redhat.com>,
"Dave Airlie" <airlied@redhat.com>,
"Brendan Conoboy" <blc@redhat.com>,
"Joe Donohue" <jdonohue@redhat.com>,
"Duncan Poole" <dpoole@nvidia.com>,
"Sherry Cheung" <SCheung@nvidia.com>,
"Subhash Gutti" <sgutti@nvidia.com>,
"John Hubbard" <jhubbard@nvidia.com>,
"Mark Hairgrove" <mhairgrove@nvidia.com>,
"Lucien Dunning" <ldunning@nvidia.com>,
"Cameron Buschardt" <cabuschardt@nvidia.com>,
"Arvind Gopalakrishnan" <arvindg@nvidia.com>,
"Haggai Eran" <haggaie@mellanox.com>,
"Shachar Raindel" <raindel@mellanox.com>,
"Liran Liss" <liranl@mellanox.com>,
"Roland Dreier" <roland@purestorage.com>,
"Ben Sander" <ben.sander@amd.com>,
"Greg Stoner" <Greg.Stoner@amd.com>,
"John Bridgman" <John.Bridgman@amd.com>,
"Michael Mantor" <Michael.Mantor@amd.com>,
"Paul Blinzer" <Paul.Blinzer@amd.com>,
"Laurent Morichetti" <Laurent.Morichetti@amd.com>,
"Alexander Deucher" <Alexander.Deucher@amd.com>,
"Oded Gabbay" <Oded.Gabbay@amd.com>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Jatin Kumar" <jakumar@nvidia.com>
Subject: [PATCH 25/36] HMM: add helpers for migration back to system memory.
Date: Thu, 21 May 2015 16:23:01 -0400 [thread overview]
Message-ID: <1432239792-5002-6-git-send-email-jglisse@redhat.com> (raw)
In-Reply-To: <1432239792-5002-1-git-send-email-jglisse@redhat.com>
From: JA(C)rA'me Glisse <jglisse@redhat.com>
This patch add all necessary functions and helpers for migration
from device memory back to system memory. They are 3 differents
case that would use that code :
- CPU page fault
- fork
- device driver request
Note that this patch use regular memory accounting this means that
migration can fail as a result of memory cgroup resource exhaustion.
Latter patches will modify memcg to allow to keep remote memory
accounted as regular memory thus removing this point of failure.
Signed-off-by: JA(C)rA'me Glisse <jglisse@redhat.com>
Signed-off-by: Sherry Cheung <SCheung@nvidia.com>
Signed-off-by: Subhash Gutti <sgutti@nvidia.com>
Signed-off-by: Mark Hairgrove <mhairgrove@nvidia.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Jatin Kumar <jakumar@nvidia.com>
---
mm/hmm.c | 157 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 157 insertions(+)
diff --git a/mm/hmm.c b/mm/hmm.c
index b8807b2..1208f64 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -50,6 +50,12 @@ static struct mmu_notifier_ops hmm_notifier_ops;
static inline struct hmm_mirror *hmm_mirror_ref(struct hmm_mirror *mirror);
static inline void hmm_mirror_unref(struct hmm_mirror **mirror);
static void hmm_mirror_kill(struct hmm_mirror *mirror);
+static int hmm_mirror_migrate_back(struct hmm_mirror *mirror,
+ struct hmm_event *event,
+ pte_t *new_pte,
+ dma_addr_t *dst,
+ unsigned long start,
+ unsigned long end);
static inline int hmm_mirror_update(struct hmm_mirror *mirror,
struct hmm_event *event,
struct page *page);
@@ -425,6 +431,46 @@ static struct mmu_notifier_ops hmm_notifier_ops = {
};
+static int hmm_migrate_back(struct hmm *hmm,
+ struct hmm_event *event,
+ struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ pte_t *new_pte,
+ dma_addr_t *dst,
+ unsigned long start,
+ unsigned long end)
+{
+ struct hmm_mirror *mirror;
+ int r, ret;
+
+ /*
+ * Do not return right away on error, as there might be valid page we
+ * can migrate.
+ */
+ ret = mm_hmm_migrate_back(mm, vma, new_pte, start, end);
+
+again:
+ down_read(&hmm->rwsem);
+ hlist_for_each_entry(mirror, &hmm->mirrors, mlist) {
+ r = hmm_mirror_migrate_back(mirror, event, new_pte,
+ dst, start, end);
+ if (r) {
+ ret = ret ? ret : r;
+ mirror = hmm_mirror_ref(mirror);
+ BUG_ON(!mirror);
+ up_read(&hmm->rwsem);
+ hmm_mirror_kill(mirror);
+ hmm_mirror_unref(&mirror);
+ goto again;
+ }
+ }
+ up_read(&hmm->rwsem);
+
+ mm_hmm_migrate_back_cleanup(mm, vma, new_pte, dst, start, end);
+
+ return ret;
+}
+
int hmm_handle_cpu_fault(struct mm_struct *mm,
struct vm_area_struct *vma,
pmd_t *pmdp, unsigned long addr,
@@ -1085,6 +1131,117 @@ out:
}
EXPORT_SYMBOL(hmm_mirror_fault);
+static int hmm_mirror_migrate_back(struct hmm_mirror *mirror,
+ struct hmm_event *event,
+ pte_t *new_pte,
+ dma_addr_t *dst,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long addr, i, npages = (end - start) >> PAGE_SHIFT;
+ struct hmm_device *device = mirror->device;
+ struct device *dev = mirror->device->dev;
+ struct hmm_pt_iter iter;
+ int r, ret = 0;
+
+ hmm_pt_iter_init(&iter);
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
+ dma_addr_t *hmm_pte;
+
+ hmm_pte_clear_select(&dst[i]);
+
+ if (!pte_present(new_pte[i]))
+ continue;
+ hmm_pte = hmm_pt_iter_update(&iter, &mirror->pt, addr);
+ if (!hmm_pte)
+ continue;
+
+ if (!hmm_pte_test_valid_dev(hmm_pte))
+ continue;
+
+ dst[i] = hmm_pte_from_pfn(pte_pfn(new_pte[i]));
+ hmm_pte_set_select(&dst[i]);
+ hmm_pte_set_write(&dst[i]);
+ }
+
+ if (device->dev) {
+ ret = hmm_mirror_dma_map_range(mirror, dst, NULL, npages);
+ if (ret) {
+ for (i = 0; i < npages; ++i) {
+ if (!hmm_pte_test_select(&dst[i]))
+ continue;
+ if (hmm_pte_test_valid_dma(&dst[i]))
+ continue;
+ dst[i] = 0;
+ }
+ }
+ }
+
+ r = device->ops->copy_from_device(mirror, event, dst, start, end);
+
+ /* Update mirror page table with successfully migrated entry. */
+ for (addr = start; addr < end;) {
+ unsigned long idx, next, npages;
+ dma_addr_t *hmm_pte;
+
+ hmm_pte = hmm_pt_iter_update(&iter, &mirror->pt, addr);
+ if (!hmm_pte) {
+ addr = hmm_pt_iter_next(&iter, &mirror->pt,
+ addr, end);
+ continue;
+ }
+
+ next = hmm_pt_level_next(&mirror->pt, addr, end,
+ mirror->pt.llevel - 1);
+
+ idx = (addr - event->start) >> PAGE_SHIFT;
+ npages = (next - addr) >> PAGE_SHIFT;
+ hmm_pt_iter_directory_lock(&iter, &mirror->pt);
+ for (i = 0; i < npages; i++, idx++) {
+ if (!hmm_pte_test_valid_pfn(&dst[idx]) &&
+ !hmm_pte_test_valid_dma(&dst[idx])) {
+ if (hmm_pte_test_valid_dev(&hmm_pte[i])) {
+ hmm_pte[i] = 0;
+ hmm_pt_iter_directory_unref(&iter,
+ mirror->pt.llevel);
+ }
+ continue;
+ }
+
+ VM_BUG_ON(!hmm_pte_test_select(&dst[idx]));
+ VM_BUG_ON(!hmm_pte_test_valid_dev(&hmm_pte[i]));
+ hmm_pte[i] = dst[idx];
+ }
+ hmm_pt_iter_directory_unlock(&iter, &mirror->pt);
+
+ /* DMA unmap failed migrate entry. */
+ if (dev) {
+ idx = (addr - event->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++, idx++) {
+ dma_addr_t dma_addr;
+
+ /*
+ * Failed entry have the valid bit clear but
+ * the select bit remain intact.
+ */
+ if (!hmm_pte_test_select(&dst[idx]) &&
+ !hmm_pte_test_valid_dma(&dst[i]))
+ continue;
+
+ hmm_pte_set_valid_dma(&dst[idx]);
+ dma_addr = hmm_pte_dma_addr(*hmm_pte);
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+
+ addr = next;
+ }
+ hmm_pt_iter_fini(&iter, &mirror->pt);
+
+ return ret ? ret : r;
+}
+
/* hmm_mirror_range_discard() - discard a range of address.
*
* @mirror: The mirror struct.
--
1.9.3
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2015-05-21 20:24 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-21 19:31 HMM (Heterogeneous Memory Management) v8 j.glisse
2015-05-21 19:31 ` [PATCH 01/36] mmu_notifier: add event information to address invalidation v7 j.glisse
2015-05-30 3:43 ` John Hubbard
2015-06-01 19:03 ` Jerome Glisse
2015-06-01 23:10 ` John Hubbard
2015-06-03 16:07 ` Jerome Glisse
2015-06-03 23:02 ` John Hubbard
2015-05-21 19:31 ` [PATCH 02/36] mmu_notifier: keep track of active invalidation ranges v3 j.glisse
2015-05-27 5:09 ` Aneesh Kumar K.V
2015-05-27 14:32 ` Jerome Glisse
2015-06-02 9:32 ` John Hubbard
2015-06-03 17:15 ` Jerome Glisse
2015-06-05 3:29 ` John Hubbard
2015-05-21 19:31 ` [PATCH 03/36] mmu_notifier: pass page pointer to mmu_notifier_invalidate_page() j.glisse
2015-05-27 5:17 ` Aneesh Kumar K.V
2015-05-27 14:33 ` Jerome Glisse
2015-06-03 4:25 ` John Hubbard
2015-05-21 19:31 ` [PATCH 04/36] mmu_notifier: allow range invalidation to exclude a specific mmu_notifier j.glisse
2015-05-21 19:31 ` [PATCH 05/36] HMM: introduce heterogeneous memory management v3 j.glisse
2015-05-27 5:50 ` Aneesh Kumar K.V
2015-05-27 14:38 ` Jerome Glisse
2015-06-08 19:40 ` Mark Hairgrove
2015-06-08 21:17 ` Jerome Glisse
2015-06-09 1:54 ` Mark Hairgrove
2015-06-09 15:56 ` Jerome Glisse
2015-06-10 3:33 ` Mark Hairgrove
2015-06-10 15:42 ` Jerome Glisse
2015-06-11 1:15 ` Mark Hairgrove
2015-06-11 14:23 ` Jerome Glisse
2015-06-11 22:26 ` Mark Hairgrove
2015-06-15 14:32 ` Jerome Glisse
2015-05-21 19:31 ` [PATCH 06/36] HMM: add HMM page table v2 j.glisse
2015-06-19 2:06 ` Mark Hairgrove
2015-06-19 18:07 ` Jerome Glisse
2015-06-20 2:34 ` Mark Hairgrove
2015-06-25 22:57 ` Mark Hairgrove
2015-06-26 16:30 ` Jerome Glisse
2015-06-27 1:34 ` Mark Hairgrove
2015-06-29 14:43 ` Jerome Glisse
2015-07-01 2:51 ` Mark Hairgrove
2015-07-01 15:07 ` Jerome Glisse
2015-05-21 19:31 ` [PATCH 07/36] HMM: add per mirror page table v3 j.glisse
2015-06-25 23:05 ` Mark Hairgrove
2015-06-26 16:43 ` Jerome Glisse
2015-06-27 3:02 ` Mark Hairgrove
2015-06-29 14:50 ` Jerome Glisse
2015-05-21 19:31 ` [PATCH 08/36] HMM: add device page fault support v3 j.glisse
2015-05-21 19:31 ` [PATCH 09/36] HMM: add mm page table iterator helpers j.glisse
2015-05-21 19:31 ` [PATCH 10/36] HMM: use CPU page table during invalidation j.glisse
2015-05-21 19:31 ` [PATCH 11/36] HMM: add discard range helper (to clear and free resources for a range) j.glisse
2015-05-21 19:31 ` [PATCH 12/36] HMM: add dirty range helper (to toggle dirty bit inside mirror page table) j.glisse
2015-05-21 19:31 ` [PATCH 13/36] HMM: DMA map memory on behalf of device driver j.glisse
2015-05-21 19:31 ` [PATCH 14/36] fork: pass the dst vma to copy_page_range() and its sub-functions j.glisse
2015-05-21 19:31 ` [PATCH 15/36] memcg: export get_mem_cgroup_from_mm() j.glisse
2015-05-21 19:31 ` [PATCH 16/36] HMM: add special swap filetype for memory migrated to HMM device memory j.glisse
2015-06-24 7:49 ` Haggai Eran
2015-05-21 19:31 ` [PATCH 17/36] HMM: add new HMM page table flag (valid device memory) j.glisse
2015-05-21 19:31 ` [PATCH 18/36] HMM: add new HMM page table flag (select flag) j.glisse
2015-05-21 19:31 ` [PATCH 19/36] HMM: handle HMM device page table entry on mirror page table fault and update j.glisse
2015-05-21 20:22 ` [PATCH 20/36] HMM: mm add helper to update page table when migrating memory back jglisse
2015-05-21 20:22 ` [PATCH 21/36] HMM: mm add helper to update page table when migrating memory jglisse
2015-05-21 20:22 ` [PATCH 22/36] HMM: add new callback for copying memory from and to device memory jglisse
2015-05-21 20:22 ` [PATCH 23/36] HMM: allow to get pointer to spinlock protecting a directory jglisse
2015-05-21 20:23 ` [PATCH 24/36] HMM: split DMA mapping function in two jglisse
2015-05-21 20:23 ` jglisse [this message]
2015-05-21 20:23 ` [PATCH 26/36] HMM: fork copy migrated memory into system memory for child process jglisse
2015-05-21 20:23 ` [PATCH 27/36] HMM: CPU page fault on migrated memory jglisse
2015-05-21 20:23 ` [PATCH 28/36] HMM: add mirror fault support for system to device memory migration jglisse
2015-05-21 20:23 ` [PATCH 29/36] IB/mlx5: add a new paramter to __mlx_ib_populated_pas for ODP with HMM jglisse
2015-05-21 20:23 ` [PATCH 30/36] IB/mlx5: add a new paramter to mlx5_ib_update_mtt() " jglisse
2015-05-21 20:23 ` [PATCH 31/36] IB/odp: export rbt_ib_umem_for_each_in_range() jglisse
2015-05-21 20:23 ` [PATCH 32/36] IB/odp/hmm: add new kernel option to use HMM for ODP jglisse
2015-05-21 20:23 ` [PATCH 33/36] IB/odp/hmm: add core infiniband structure and helper for ODP with HMM jglisse
2015-06-24 13:59 ` Haggai Eran
2015-05-21 20:23 ` [PATCH 34/36] IB/mlx5/hmm: add mlx5 HMM device initialization and callback jglisse
2015-05-21 20:23 ` [PATCH 35/36] IB/mlx5/hmm: add page fault support for ODP on HMM jglisse
2015-05-21 20:23 ` [PATCH 36/36] IB/mlx5/hmm: enable ODP using HMM jglisse
2015-05-30 3:01 ` HMM (Heterogeneous Memory Management) v8 John Hubbard
2015-05-31 6:56 ` Haggai Eran
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1432239792-5002-6-git-send-email-jglisse@redhat.com \
--to=jglisse@redhat.com \
--cc=Alexander.Deucher@amd.com \
--cc=Greg.Stoner@amd.com \
--cc=John.Bridgman@amd.com \
--cc=Laurent.Morichetti@amd.com \
--cc=Michael.Mantor@amd.com \
--cc=Oded.Gabbay@amd.com \
--cc=Paul.Blinzer@amd.com \
--cc=SCheung@nvidia.com \
--cc=aarcange@redhat.com \
--cc=airlied@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=arvindg@nvidia.com \
--cc=ben.sander@amd.com \
--cc=blc@redhat.com \
--cc=cabuschardt@nvidia.com \
--cc=dpoole@nvidia.com \
--cc=haggaie@mellanox.com \
--cc=hpa@zytor.com \
--cc=jakumar@nvidia.com \
--cc=jdonohue@redhat.com \
--cc=jhubbard@nvidia.com \
--cc=joro@8bytes.org \
--cc=jweiner@redhat.com \
--cc=ldunning@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=liranl@mellanox.com \
--cc=lwoodman@redhat.com \
--cc=mgorman@suse.de \
--cc=mhairgrove@nvidia.com \
--cc=peterz@infradead.org \
--cc=raindel@mellanox.com \
--cc=riel@redhat.com \
--cc=roland@purestorage.com \
--cc=sgutti@nvidia.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).