From: Felix Kuehling <Felix.Kuehling@amd.com>
To: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: alex.sierra@amd.com, Philip Yang <Philip.Yang@amd.com>
Subject: [PATCH 13/35] drm/amdkfd: map svm range to GPUs
Date: Wed, 6 Jan 2021 22:01:05 -0500 [thread overview]
Message-ID: <20210107030127.20393-14-Felix.Kuehling@amd.com> (raw)
In-Reply-To: <20210107030127.20393-1-Felix.Kuehling@amd.com>
From: Philip Yang <Philip.Yang@amd.com>
Use amdgpu_vm_bo_update_mapping to update GPU page table to map or unmap
svm range system memory pages address to GPUs.
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 232 ++++++++++++++++++++++++++-
drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 +
2 files changed, 233 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 55500ec4972f..3c4a036609c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -534,6 +534,229 @@ svm_range_split_add_front(struct svm_range *prange, struct svm_range *new,
return 0;
}
+static uint64_t
+svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
+{
+ uint32_t flags = prange->flags;
+ uint32_t mapping_flags;
+ uint64_t pte_flags;
+
+ pte_flags = AMDGPU_PTE_VALID;
+ pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
+
+ if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
+ mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
+ if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+ if (flags & KFD_IOCTL_SVM_FLAG_COHERENT)
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+
+ /* TODO: add CHIP_ARCTURUS new flags for vram mapping */
+
+ pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
+
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, &prange->mapping, &pte_flags);
+
+ pr_debug("PTE flags 0x%llx\n", pte_flags);
+
+ return pte_flags;
+}
+
+static int
+svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct svm_range *prange, struct dma_fence **fence)
+{
+ uint64_t init_pte_value = 0;
+ uint64_t start;
+ uint64_t last;
+
+ start = prange->it_node.start;
+ last = prange->it_node.last;
+
+ pr_debug("svms 0x%p [0x%llx 0x%llx]\n", prange->svms, start, last);
+
+ return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
+ start, last, init_pte_value, 0,
+ NULL, NULL, fence);
+}
+
+static int
+svm_range_unmap_from_gpus(struct svm_range *prange)
+{
+ DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
+ struct kfd_process_device *pdd;
+ struct dma_fence *fence = NULL;
+ struct amdgpu_device *adev;
+ struct kfd_process *p;
+ struct kfd_dev *dev;
+ uint32_t gpuidx;
+ int r = 0;
+
+ bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
+ MAX_GPU_INSTANCE);
+ p = container_of(prange->svms, struct kfd_process, svms);
+
+ for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
+ pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
+ r = kfd_process_device_from_gpuidx(p, gpuidx, &dev);
+ if (r) {
+ pr_debug("failed to find device idx %d\n", gpuidx);
+ return -EINVAL;
+ }
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd))
+ return -EINVAL;
+
+ adev = (struct amdgpu_device *)dev->kgd;
+
+ r = svm_range_unmap_from_gpu(adev, pdd->vm, prange, &fence);
+ if (r)
+ break;
+
+ if (fence) {
+ r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ fence = NULL;
+ if (r)
+ break;
+ }
+
+ amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
+ p->pasid);
+ }
+
+ return r;
+}
+
+static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+static int
+svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct svm_range *prange, bool reserve_vm,
+ struct dma_fence **fence)
+{
+ struct amdgpu_bo *root;
+ dma_addr_t *pages_addr;
+ uint64_t pte_flags;
+ int r = 0;
+
+ pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
+ prange->it_node.start, prange->it_node.last);
+
+ if (reserve_vm) {
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ r = amdgpu_bo_reserve(root, true);
+ if (r) {
+ pr_debug("failed %d to reserve root bo\n", r);
+ amdgpu_bo_unref(&root);
+ goto out;
+ }
+ r = amdgpu_vm_validate_pt_bos(adev, vm, svm_range_bo_validate,
+ NULL);
+ if (r) {
+ pr_debug("failed %d validate pt bos\n", r);
+ goto unreserve_out;
+ }
+ }
+
+ prange->mapping.start = prange->it_node.start;
+ prange->mapping.last = prange->it_node.last;
+ prange->mapping.offset = 0;
+ pte_flags = svm_range_get_pte_flags(adev, prange);
+ prange->mapping.flags = pte_flags;
+ pages_addr = prange->pages_addr;
+
+ r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, NULL,
+ prange->mapping.start,
+ prange->mapping.last, pte_flags,
+ prange->mapping.offset, NULL,
+ pages_addr, &vm->last_update);
+ if (r) {
+ pr_debug("failed %d to map to gpu 0x%lx\n", r,
+ prange->it_node.start);
+ goto unreserve_out;
+ }
+
+
+ r = amdgpu_vm_update_pdes(adev, vm, false);
+ if (r) {
+ pr_debug("failed %d to update directories 0x%lx\n", r,
+ prange->it_node.start);
+ goto unreserve_out;
+ }
+
+ if (fence)
+ *fence = dma_fence_get(vm->last_update);
+
+unreserve_out:
+ if (reserve_vm) {
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+ }
+
+out:
+ return r;
+}
+
+static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
+{
+ DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
+ struct kfd_process_device *pdd;
+ struct amdgpu_device *adev;
+ struct kfd_process *p;
+ struct kfd_dev *dev;
+ struct dma_fence *fence = NULL;
+ uint32_t gpuidx;
+ int r = 0;
+
+ bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
+ MAX_GPU_INSTANCE);
+ p = container_of(prange->svms, struct kfd_process, svms);
+
+ for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
+ r = kfd_process_device_from_gpuidx(p, gpuidx, &dev);
+ if (r) {
+ pr_debug("failed to find device idx %d\n", gpuidx);
+ return -EINVAL;
+ }
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd))
+ return -EINVAL;
+ adev = (struct amdgpu_device *)dev->kgd;
+
+ r = svm_range_map_to_gpu(adev, pdd->vm, prange, reserve_vm,
+ &fence);
+ if (r)
+ break;
+
+ if (fence) {
+ r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ fence = NULL;
+ if (r) {
+ pr_debug("failed %d to dma fence wait\n", r);
+ break;
+ }
+ }
+ }
+
+ return r;
+}
+
struct svm_range *svm_range_clone(struct svm_range *old)
{
struct svm_range *new;
@@ -750,6 +973,7 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, unsigned long start,
*/
list_for_each_entry_safe(prange, tmp, &update_list, update_list) {
list_del(&prange->list);
+ svm_range_unmap_from_gpus(prange);
mutex_lock(&svms->free_list_lock);
list_add_tail(&prange->remove_list, &svms->free_list);
mutex_unlock(&svms->free_list_lock);
@@ -991,8 +1215,14 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
}
r = svm_range_validate(mm, prange);
- if (r)
+ if (r) {
pr_debug("failed %d to validate svm range\n", r);
+ goto out_unlock;
+ }
+
+ r = svm_range_map_to_gpus(prange, true);
+ if (r)
+ pr_debug("failed %d to map svm range\n", r);
out_unlock:
if (r) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 4d394f72eefc..fb68b5ee54f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -42,6 +42,7 @@
* @update_list:link list node used to add to update_list
* @remove_list:link list node used to add to remove list
* @hmm_range: hmm range structure used by hmm_range_fault to get system pages
+ * @mapping: bo_va mapping structure to create and update GPU page table
* @npages: number of pages
* @pages_addr: list of system memory physical page address
* @flags: flags defined as KFD_IOCTL_SVM_FLAG_*
@@ -63,6 +64,7 @@ struct svm_range {
struct list_head update_list;
struct list_head remove_list;
struct hmm_range *hmm_range;
+ struct amdgpu_bo_va_mapping mapping;
uint64_t npages;
dma_addr_t *pages_addr;
uint32_t flags;
--
2.29.2
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
next prev parent reply other threads:[~2021-01-07 3:03 UTC|newest]
Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-07 3:00 [PATCH 00/35] Add HMM-based SVM memory manager to KFD Felix Kuehling
2021-01-07 3:00 ` [PATCH 01/35] drm/amdkfd: select kernel DEVICE_PRIVATE option Felix Kuehling
2021-01-07 3:00 ` [PATCH 02/35] drm/amdgpu: replace per_device_list by array Felix Kuehling
2021-01-07 3:00 ` [PATCH 03/35] drm/amdkfd: helper to convert gpu id and idx Felix Kuehling
2021-01-07 3:00 ` [PATCH 04/35] drm/amdkfd: add svm ioctl API Felix Kuehling
2021-01-07 3:00 ` [PATCH 05/35] drm/amdkfd: Add SVM API support capability bits Felix Kuehling
2021-01-07 3:00 ` [PATCH 06/35] drm/amdkfd: register svm range Felix Kuehling
2021-01-07 3:00 ` [PATCH 07/35] drm/amdkfd: add svm ioctl GET_ATTR op Felix Kuehling
2021-01-07 3:01 ` [PATCH 08/35] drm/amdgpu: add common HMM get pages function Felix Kuehling
2021-01-07 10:53 ` Christian König
2021-01-07 3:01 ` [PATCH 09/35] drm/amdkfd: validate svm range system memory Felix Kuehling
2021-01-07 3:01 ` [PATCH 10/35] drm/amdkfd: register overlap system memory range Felix Kuehling
2021-01-07 3:01 ` [PATCH 11/35] drm/amdkfd: deregister svm range Felix Kuehling
2021-01-07 3:01 ` [PATCH 12/35] drm/amdgpu: export vm update mapping interface Felix Kuehling
2021-01-07 10:54 ` Christian König
2021-01-07 3:01 ` Felix Kuehling [this message]
2021-01-07 3:01 ` [PATCH 14/35] drm/amdkfd: svm range eviction and restore Felix Kuehling
2021-01-07 3:01 ` [PATCH 15/35] drm/amdkfd: add xnack enabled flag to kfd_process Felix Kuehling
2021-01-07 3:01 ` [PATCH 16/35] drm/amdkfd: add ioctl to configure and query xnack retries Felix Kuehling
2021-01-07 3:01 ` [PATCH 17/35] drm/amdkfd: register HMM device private zone Felix Kuehling
2021-03-01 8:32 ` Daniel Vetter
2021-03-01 8:46 ` Thomas Hellström (Intel)
2021-03-01 8:58 ` Daniel Vetter
2021-03-01 9:30 ` Thomas Hellström (Intel)
2021-03-04 17:58 ` Felix Kuehling
2021-03-11 12:24 ` Thomas Hellström (Intel)
2021-01-07 3:01 ` [PATCH 18/35] drm/amdkfd: validate vram svm range from TTM Felix Kuehling
2021-01-07 3:01 ` [PATCH 19/35] drm/amdkfd: support xgmi same hive mapping Felix Kuehling
2021-01-07 3:01 ` [PATCH 20/35] drm/amdkfd: copy memory through gart table Felix Kuehling
2021-01-07 3:01 ` [PATCH 21/35] drm/amdkfd: HMM migrate ram to vram Felix Kuehling
2021-01-07 3:01 ` [PATCH 22/35] drm/amdkfd: HMM migrate vram to ram Felix Kuehling
2021-01-07 3:01 ` [PATCH 23/35] drm/amdkfd: invalidate tables on page retry fault Felix Kuehling
2021-01-07 3:01 ` [PATCH 24/35] drm/amdkfd: page table restore through svm API Felix Kuehling
2021-01-07 3:01 ` [PATCH 25/35] drm/amdkfd: SVM API call to restore page tables Felix Kuehling
2021-01-07 3:01 ` [PATCH 26/35] drm/amdkfd: add svm_bo reference for eviction fence Felix Kuehling
2021-01-07 3:01 ` [PATCH 27/35] drm/amdgpu: add param bit flag to create SVM BOs Felix Kuehling
2021-01-07 3:01 ` [PATCH 28/35] drm/amdkfd: add svm_bo eviction mechanism support Felix Kuehling
2021-01-07 3:01 ` [PATCH 29/35] drm/amdgpu: svm bo enable_signal call condition Felix Kuehling
2021-01-07 10:56 ` Christian König
2021-01-07 16:16 ` Felix Kuehling
2021-01-07 16:28 ` Christian König
2021-01-07 16:53 ` Felix Kuehling
2021-01-07 3:01 ` [PATCH 30/35] drm/amdgpu: add svm_bo eviction to enable_signal cb Felix Kuehling
2021-01-07 3:01 ` [PATCH 31/35] drm/amdgpu: reserve fence slot to update page table Felix Kuehling
2021-01-07 10:57 ` Christian König
2021-01-07 3:01 ` [PATCH 32/35] drm/amdgpu: enable retry fault wptr overflow Felix Kuehling
2021-01-07 11:01 ` Christian König
2021-01-07 3:01 ` [PATCH 33/35] drm/amdkfd: refine migration policy with xnack on Felix Kuehling
2021-01-07 3:01 ` [PATCH 34/35] drm/amdkfd: add svm range validate timestamp Felix Kuehling
2021-01-07 3:01 ` [PATCH 35/35] drm/amdkfd: multiple gpu migrate vram to vram Felix Kuehling
2021-01-07 9:23 ` [PATCH 00/35] Add HMM-based SVM memory manager to KFD Daniel Vetter
2021-01-07 16:25 ` Felix Kuehling
2021-01-08 14:40 ` Daniel Vetter
2021-01-08 14:45 ` Christian König
2021-01-08 15:58 ` Felix Kuehling
2021-01-08 16:06 ` Daniel Vetter
2021-01-08 16:36 ` Felix Kuehling
2021-01-08 16:53 ` Daniel Vetter
2021-01-08 17:56 ` Felix Kuehling
2021-01-11 16:29 ` Daniel Vetter
2021-01-14 5:34 ` Felix Kuehling
2021-01-14 12:19 ` Christian König
2021-01-13 16:56 ` Jerome Glisse
2021-01-13 20:31 ` Daniel Vetter
2021-01-14 3:27 ` Jerome Glisse
2021-01-14 9:26 ` Daniel Vetter
2021-01-14 10:39 ` Daniel Vetter
2021-01-14 10:49 ` Christian König
2021-01-14 11:52 ` Daniel Vetter
2021-01-14 13:37 ` HMM fence (was Re: [PATCH 00/35] Add HMM-based SVM memory manager to KFD) Christian König
2021-01-14 13:57 ` Daniel Vetter
2021-01-14 14:13 ` Christian König
2021-01-14 14:23 ` Daniel Vetter
2021-01-14 15:08 ` Christian König
2021-01-14 15:40 ` Daniel Vetter
2021-01-14 16:01 ` Christian König
2021-01-14 16:36 ` Daniel Vetter
2021-01-14 19:08 ` Christian König
2021-01-14 20:09 ` Daniel Vetter
2021-01-14 16:51 ` Jerome Glisse
2021-01-14 21:13 ` Felix Kuehling
2021-01-15 7:47 ` Christian König
2021-01-13 16:47 ` [PATCH 00/35] Add HMM-based SVM memory manager to KFD Jerome Glisse
2021-01-14 0:06 ` Felix Kuehling
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210107030127.20393-14-Felix.Kuehling@amd.com \
--to=felix.kuehling@amd.com \
--cc=Philip.Yang@amd.com \
--cc=alex.sierra@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=dri-devel@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).