From: Felix Kuehling <Felix.Kuehling@amd.com>
To: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: Philip Yang <Philip.Yang@amd.com>
Subject: [PATCH 07/34] drm/amdkfd: validate svm range system memory
Date: Mon, 5 Apr 2021 21:46:02 -0400 [thread overview]
Message-ID: <20210406014629.25141-8-Felix.Kuehling@amd.com> (raw)
In-Reply-To: <20210406014629.25141-1-Felix.Kuehling@amd.com>
From: Philip Yang <Philip.Yang@amd.com>
Use HMM to get system memory pages address, which will be used to
map to GPUs or migrate to vram.
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 116 ++++++++++++++++++++++++++-
drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 18 +++++
2 files changed, 133 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 1d99f2f0cb31..9cdc030fe5c8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -28,6 +28,15 @@
#include "kfd_priv.h"
#include "kfd_svm.h"
+static bool
+svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
+
+static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
+ .invalidate = svm_range_cpu_invalidate_pagetables,
+};
+
/**
* svm_range_unlink - unlink svm_range from lists and interval tree
* @prange: svm range structure to be removed
@@ -46,6 +55,18 @@ static void svm_range_unlink(struct svm_range *prange)
interval_tree_remove(&prange->it_node, &prange->svms->objects);
}
+static void
+svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
+{
+ pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
+ prange, prange->start, prange->last);
+
+ mmu_interval_notifier_insert_locked(&prange->notifier, mm,
+ prange->start << PAGE_SHIFT,
+ prange->npages << PAGE_SHIFT,
+ &svm_range_mn_ops);
+}
+
/**
* svm_range_add_to_svms - add svm range to svms
* @prange: svm range structure to be added
@@ -65,11 +86,24 @@ static void svm_range_add_to_svms(struct svm_range *prange)
interval_tree_insert(&prange->it_node, &prange->svms->objects);
}
+static void svm_range_remove_notifier(struct svm_range *prange)
+{
+ pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
+ prange->svms, prange,
+ prange->notifier.interval_tree.start >> PAGE_SHIFT,
+ prange->notifier.interval_tree.last >> PAGE_SHIFT);
+
+ if (prange->notifier.interval_tree.start != 0 &&
+ prange->notifier.interval_tree.last != 0)
+ mmu_interval_notifier_remove(&prange->notifier);
+}
+
static void svm_range_free(struct svm_range *prange)
{
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
prange->start, prange->last);
+ mutex_destroy(&prange->lock);
kfree(prange);
}
@@ -102,6 +136,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
INIT_LIST_HEAD(&prange->update_list);
INIT_LIST_HEAD(&prange->remove_list);
INIT_LIST_HEAD(&prange->insert_list);
+ mutex_init(&prange->lock);
svm_range_set_default_attributes(&prange->preferred_loc,
&prange->prefetch_loc,
&prange->granularity, &prange->flags);
@@ -377,6 +412,65 @@ svm_range_split_head(struct svm_range *prange, struct svm_range *new,
return r;
}
+/*
+ * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
+ *
+ * To prevent concurrent destruction or change of range attributes, the
+ * svm_read_lock must be held. The caller must not hold the svm_write_lock
+ * because that would block concurrent evictions and lead to deadlocks. To
+ * serialize concurrent migrations or validations of the same range, the
+ * prange->migrate_mutex must be held.
+ *
+ * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
+ * eviction fence.
+ *
+ * The following sequence ensures race-free validation and GPU mapping:
+ *
+ * 1. Reserve page table (and SVM BO if range is in VRAM)
+ * 2. hmm_range_fault to get page addresses (if system memory)
+ * 3. DMA-map pages (if system memory)
+ * 4-a. Take notifier lock
+ * 4-b. Check that pages still valid (mmu_interval_read_retry)
+ * 4-c. Check that the range was not split or otherwise invalidated
+ * 4-d. Update GPU page table
+ * 4.e. Release notifier lock
+ * 5. Release page table (and SVM BO) reservation
+ */
+static int svm_range_validate_and_map(struct mm_struct *mm,
+ struct svm_range *prange,
+ uint32_t gpuidx, bool intr, bool wait)
+{
+ struct hmm_range *hmm_range;
+ int r = 0;
+
+ if (!prange->actual_loc) {
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+ prange->start << PAGE_SHIFT,
+ prange->npages, &hmm_range,
+ false, true);
+ if (r) {
+ pr_debug("failed %d to get svm range pages\n", r);
+ goto unreserve_out;
+ }
+ }
+
+ svm_range_lock(prange);
+ if (!prange->actual_loc) {
+ if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
+ r = -EAGAIN;
+ goto unlock_out;
+ }
+ }
+
+ /* TODO: map to GPU */
+
+unlock_out:
+ svm_range_unlock(prange);
+unreserve_out:
+
+ return r;
+}
+
static struct svm_range *svm_range_clone(struct svm_range *old)
{
struct svm_range *new;
@@ -517,6 +611,18 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
return r;
}
+/**
+ * svm_range_cpu_invalidate_pagetables - interval notifier callback
+ *
+ */
+static bool
+svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ return true;
+}
+
void svm_range_list_fini(struct kfd_process *p)
{
mutex_destroy(&p->svms.lock);
@@ -670,6 +776,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
/* Apply changes as a transaction */
list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
svm_range_add_to_svms(prange);
+ svm_range_add_notifier_locked(mm, prange);
}
list_for_each_entry(prange, &update_list, update_list) {
svm_range_apply_attrs(p, prange, nattr, attrs);
@@ -681,6 +788,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
prange->svms, prange, prange->start,
prange->last);
svm_range_unlink(prange);
+ svm_range_remove_notifier(prange);
svm_range_free(prange);
}
@@ -691,7 +799,13 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
* case because the rollback wouldn't be guaranteed to work either.
*/
list_for_each_entry(prange, &update_list, update_list) {
- /* TODO */
+ r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
+ true, true);
+ if (r) {
+ pr_debug("failed %d to map 0x%lx to gpus\n", r,
+ prange->start);
+ break;
+ }
}
svm_range_debug_dump(svms);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index bdafbc950e93..f77a27a9fb27 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -45,11 +45,14 @@
* @remove_list:link list node used to add to remove list
* @insert_list:link list node used to add to insert list
* @npages: number of pages
+ * @lock: protect prange start, last, child_list, svm_bo_list
+ * @saved_flags:save/restore current PF_MEMALLOC flags
* @flags: flags defined as KFD_IOCTL_SVM_FLAG_*
* @perferred_loc: perferred location, 0 for CPU, or GPU id
* @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
* @actual_loc: the actual location, 0 for CPU, or GPU id
* @granularity:migration granularity, log2 num pages
+ * @notifier: register mmu interval notifier
* @bitmap_access: index bitmap of GPUs which can access the range
* @bitmap_aip: index bitmap of GPUs which can access the range in place
*
@@ -67,15 +70,30 @@ struct svm_range {
struct list_head remove_list;
struct list_head insert_list;
uint64_t npages;
+ struct mutex lock;
+ unsigned int saved_flags;
uint32_t flags;
uint32_t preferred_loc;
uint32_t prefetch_loc;
uint32_t actual_loc;
uint8_t granularity;
+ struct mmu_interval_notifier notifier;
DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
};
+static inline void svm_range_lock(struct svm_range *prange)
+{
+ mutex_lock(&prange->lock);
+ prange->saved_flags = memalloc_noreclaim_save();
+
+}
+static inline void svm_range_unlock(struct svm_range *prange)
+{
+ memalloc_noreclaim_restore(prange->saved_flags);
+ mutex_unlock(&prange->lock);
+}
+
int svm_range_list_init(struct kfd_process *p);
void svm_range_list_fini(struct kfd_process *p);
int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
--
2.31.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
next prev parent reply other threads:[~2021-04-06 1:47 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-06 1:45 [PATCH 00/34] Add HMM-based SVM memory manager to KFD v4 Felix Kuehling
2021-04-06 1:45 ` [PATCH 01/34] drm/amdkfd: helper to convert gpu id and idx Felix Kuehling
2021-04-06 1:45 ` [PATCH 02/34] drm/amdkfd: add svm ioctl API Felix Kuehling
2021-04-06 1:45 ` [PATCH 03/34] drm/amdkfd: register svm range Felix Kuehling
2021-04-06 1:45 ` [PATCH 04/34] drm/amdkfd: add svm ioctl GET_ATTR op Felix Kuehling
2021-04-06 1:46 ` [PATCH 05/34] drm/amdgpu: add common HMM get pages function Felix Kuehling
2021-04-06 1:46 ` [PATCH 06/34] drm/amdkfd: support larger svm range allocation Felix Kuehling
2021-04-06 1:46 ` Felix Kuehling [this message]
2021-04-06 1:46 ` [PATCH 08/34] drm/amdkfd: deregister svm range Felix Kuehling
2021-04-06 1:46 ` [PATCH 09/34] drm/amdgpu: export vm update mapping interface Felix Kuehling
2021-04-06 1:46 ` [PATCH 10/34] drm/amdkfd: map svm range to GPUs Felix Kuehling
2021-04-06 1:46 ` [PATCH 11/34] drm/amdkfd: svm range eviction and restore Felix Kuehling
2021-04-06 1:46 ` [PATCH 12/34] drm/amdgpu: Enable retry faults unconditionally on Aldebaran Felix Kuehling
2021-04-06 1:46 ` [PATCH 13/34] drm/amdkfd: add xnack enabled flag to kfd_process Felix Kuehling
2021-04-06 1:46 ` [PATCH 14/34] drm/amdkfd: add ioctl to configure and query xnack retries Felix Kuehling
2021-04-06 1:46 ` [PATCH 15/34] drm/amdkfd: register HMM device private zone Felix Kuehling
2021-04-06 1:46 ` [PATCH 16/34] drm/amdkfd: validate vram svm range from TTM Felix Kuehling
2021-04-06 1:46 ` [PATCH 17/34] drm/amdkfd: support xgmi same hive mapping Felix Kuehling
2021-04-06 1:46 ` [PATCH 18/34] drm/amdkfd: copy memory through gart table Felix Kuehling
2021-04-06 1:46 ` [PATCH 19/34] drm/amdkfd: HMM migrate ram to vram Felix Kuehling
2021-04-06 1:46 ` [PATCH 20/34] drm/amdkfd: HMM migrate vram to ram Felix Kuehling
2021-04-06 1:46 ` [PATCH 21/34] drm/amdkfd: invalidate tables on page retry fault Felix Kuehling
2021-04-06 1:46 ` [PATCH 22/34] drm/amdgpu: enable 48-bit IH timestamp counter Felix Kuehling
2021-04-06 1:46 ` [PATCH 23/34] drm/amdkfd: page table restore through svm API Felix Kuehling
2021-04-06 1:46 ` [PATCH 24/34] drm/amdkfd: SVM API call to restore page tables Felix Kuehling
2021-04-06 1:46 ` [PATCH 25/34] drm/amdkfd: add svm_bo reference for eviction fence Felix Kuehling
2021-04-06 1:46 ` [PATCH 26/34] drm/amdgpu: add param bit flag to create SVM BOs Felix Kuehling
2021-04-06 1:46 ` [PATCH 27/34] drm/amdkfd: add svm_bo eviction mechanism support Felix Kuehling
2021-04-06 1:46 ` [PATCH 28/34] drm/amdgpu: svm bo enable_signal call condition Felix Kuehling
2021-04-06 1:46 ` [PATCH 29/34] drm/amdgpu: add svm_bo eviction to enable_signal cb Felix Kuehling
2021-04-06 1:46 ` [PATCH 30/34] drm/amdkfd: refine migration policy with xnack on Felix Kuehling
2021-04-06 1:46 ` [PATCH 31/34] drm/amdkfd: add svm range validate timestamp Felix Kuehling
2021-04-06 1:46 ` [PATCH 32/34] drm/amdkfd: multiple gpu migrate vram to vram Felix Kuehling
2021-04-06 1:46 ` [PATCH 33/34] drm/amdkfd: Add SVM API support capability bits Felix Kuehling
2021-04-06 1:46 ` [PATCH 34/34] drm/amdkfd: Add CONFIG_HSA_AMD_SVM Felix Kuehling
2021-04-08 15:02 ` [PATCH 00/34] Add HMM-based SVM memory manager to KFD v4 Jason Gunthorpe
2021-04-08 15:10 ` Felix Kuehling
2021-04-15 1:23 [PATCH 00/34] Add HMM-based SVM memory manager to KFD v5 Felix Kuehling
2021-04-15 1:23 ` [PATCH 07/34] drm/amdkfd: validate svm range system memory Felix Kuehling
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210406014629.25141-8-Felix.Kuehling@amd.com \
--to=felix.kuehling@amd.com \
--cc=Philip.Yang@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=dri-devel@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).