From: Felix Kuehling <Felix.Kuehling@amd.com> To: dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org Cc: Alex Sierra <alex.sierra@amd.com>, Philip Yang <Philip.Yang@amd.com> Subject: [PATCH 26/44] drm/amdkfd: add svm_bo eviction mechanism support Date: Mon, 22 Mar 2021 06:58:42 -0400 [thread overview] Message-ID: <20210322105900.14068-27-Felix.Kuehling@amd.com> (raw) In-Reply-To: <20210322105900.14068-1-Felix.Kuehling@amd.com> svm_bo eviction mechanism is different from regular BOs. Every SVM_BO created contains one eviction fence and one worker item for eviction process. SVM_BOs can be attached to one or more pranges. For SVM_BO eviction mechanism, TTM will start to call enable_signal callback for every SVM_BO until VRAM space is available. Here, all the ttm_evict calls are synchronous, this guarantees that each eviction has completed and the fence has signaled before it returns. Signed-off-by: Alex Sierra <alex.sierra@amd.com> Signed-off-by: Philip Yang <Philip.Yang@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 186 +++++++++++++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 13 +- 2 files changed, 153 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c791d91cb45d..3a7b842b362c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -34,6 +34,7 @@ #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1 +static void svm_range_evict_svm_bo_worker(struct work_struct *work); static bool svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, @@ -332,7 +333,15 @@ static void svm_range_bo_release(struct kref *kref) spin_lock(&svm_bo->list_lock); } spin_unlock(&svm_bo->list_lock); - + if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) { + /* We're not in the eviction worker. + * Signal the fence and synchronize with any + * pending eviction work. + */ + dma_fence_signal(&svm_bo->eviction_fence->base); + cancel_work_sync(&svm_bo->eviction_work); + } + dma_fence_put(&svm_bo->eviction_fence->base); amdgpu_bo_unref(&svm_bo->bo); kfree(svm_bo); } @@ -345,6 +354,61 @@ static void svm_range_bo_unref(struct svm_range_bo *svm_bo) kref_put(&svm_bo->kref, svm_range_bo_release); } +static bool svm_range_validate_svm_bo(struct svm_range *prange) +{ + mutex_lock(&prange->lock); + if (!prange->svm_bo) { + mutex_unlock(&prange->lock); + return false; + } + if (prange->ttm_res) { + /* We still have a reference, all is well */ + mutex_unlock(&prange->lock); + return true; + } + if (svm_bo_ref_unless_zero(prange->svm_bo)) { + if (READ_ONCE(prange->svm_bo->evicting)) { + struct dma_fence *f; + struct svm_range_bo *svm_bo; + /* The BO is getting evicted, + * we need to get a new one + */ + mutex_unlock(&prange->lock); + svm_bo = prange->svm_bo; + f = dma_fence_get(&svm_bo->eviction_fence->base); + svm_range_bo_unref(prange->svm_bo); + /* wait for the fence to avoid long spin-loop + * at list_empty_careful + */ + dma_fence_wait(f, false); + dma_fence_put(f); + } else { + /* The BO was still around and we got + * a new reference to it + */ + mutex_unlock(&prange->lock); + pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n", + prange->svms, prange->start, prange->last); + + prange->ttm_res = &prange->svm_bo->bo->tbo.mem; + return true; + } + + } else { + mutex_unlock(&prange->lock); + } + + /* We need a new svm_bo. Spin-loop to wait for concurrent + * svm_range_bo_release to finish removing this range from + * its range list. After this, it is safe to reuse the + * svm_bo pointer and svm_bo_list head. + */ + while (!list_empty_careful(&prange->svm_bo_list)) + ; + + return false; +} + static struct svm_range_bo *svm_range_bo_new(void) { struct svm_range_bo *svm_bo; @@ -364,72 +428,56 @@ int svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, bool clear) { - struct amdkfd_process_info *process_info; struct amdgpu_bo_param bp; struct svm_range_bo *svm_bo; struct amdgpu_bo_user *ubo; struct amdgpu_bo *bo; struct kfd_process *p; + struct mm_struct *mm; int r; - pr_debug("[0x%lx 0x%lx]\n", prange->start, prange->last); - mutex_lock(&prange->lock); - if (prange->svm_bo) { - if (prange->ttm_res) { - /* We still have a reference, all is well */ - mutex_unlock(&prange->lock); - return 0; - } - if (svm_bo_ref_unless_zero(prange->svm_bo)) { - /* The BO was still around and we got - * a new reference to it - */ - mutex_unlock(&prange->lock); - pr_debug("reuse old bo [0x%lx 0x%lx]\n", - prange->start, prange->last); - - prange->ttm_res = &prange->svm_bo->bo->tbo.mem; - return 0; - } - - mutex_unlock(&prange->lock); - - /* We need a new svm_bo. Spin-loop to wait for concurrent - * svm_range_bo_release to finish removing this range from - * its range list. After this, it is safe to reuse the - * svm_bo pointer and svm_bo_list head. - */ - while (!list_empty_careful(&prange->svm_bo_list)) - ; + p = container_of(prange->svms, struct kfd_process, svms); + pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, + prange->start, prange->last); - } else { - mutex_unlock(&prange->lock); - } + if (svm_range_validate_svm_bo(prange)) + return 0; svm_bo = svm_range_bo_new(); if (!svm_bo) { pr_debug("failed to alloc svm bo\n"); return -ENOMEM; } - + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("failed to get mm\n"); + kfree(svm_bo); + return -ESRCH; + } + svm_bo->svms = prange->svms; + svm_bo->eviction_fence = + amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), + mm, + svm_bo); + mmput(mm); + INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker); + svm_bo->evicting = 0; memset(&bp, 0, sizeof(bp)); bp.size = prange->npages * PAGE_SIZE; bp.byte_align = PAGE_SIZE; bp.domain = AMDGPU_GEM_DOMAIN_VRAM; bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0; + bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO; bp.type = ttm_bo_type_device; bp.resv = NULL; r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) { pr_debug("failed %d to create bo\n", r); - kfree(svm_bo); - return r; + goto create_bo_failed; } bo = &ubo->bo; - - p = container_of(prange->svms, struct kfd_process, svms); r = amdgpu_bo_reserve(bo, true); if (r) { pr_debug("failed %d to reserve bo\n", r); @@ -442,8 +490,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, amdgpu_bo_unreserve(bo); goto reserve_bo_failed; } - process_info = p->kgd_process_info; - amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); + amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true); amdgpu_bo_unreserve(bo); @@ -459,8 +506,10 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, return 0; reserve_bo_failed: - kfree(svm_bo); amdgpu_bo_unref(&bo); +create_bo_failed: + dma_fence_put(&svm_bo->eviction_fence->base); + kfree(svm_bo); prange->ttm_res = NULL; return r; @@ -2267,6 +2316,59 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, return r; } +int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence) +{ + if (!fence) + return -EINVAL; + + if (dma_fence_is_signaled(&fence->base)) + return 0; + + if (fence->svm_bo) { + WRITE_ONCE(fence->svm_bo->evicting, 1); + schedule_work(&fence->svm_bo->eviction_work); + } + + return 0; +} + +static void svm_range_evict_svm_bo_worker(struct work_struct *work) +{ + struct svm_range_bo *svm_bo; + struct svm_range *prange; + struct kfd_process *p; + struct mm_struct *mm; + + svm_bo = container_of(work, struct svm_range_bo, eviction_work); + if (!svm_bo_ref_unless_zero(svm_bo)) + return; /* svm_bo was freed while eviction was pending */ + + /* svm_range_bo_release destroys this worker thread. So during + * the lifetime of this thread, kfd_process and mm will be valid. + */ + p = container_of(svm_bo->svms, struct kfd_process, svms); + mm = p->mm; + if (!mm) + return; + + mmap_read_lock(mm); + list_for_each_entry(prange, &svm_bo->range_list, svm_bo_list) { + pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, + prange->start, prange->last); + mutex_lock(&prange->migrate_mutex); + svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm); + mutex_unlock(&prange->migrate_mutex); + } + mmap_read_unlock(mm); + + dma_fence_signal(&svm_bo->eviction_fence->base); + /* This is the last reference to svm_bo, after svm_range_vram_node_free + * has been called in svm_migrate_vram_to_ram + */ + WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n"); + svm_range_bo_unref(svm_bo); +} + static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 3aa6f6b97481..f8e282ec9d8a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -33,10 +33,14 @@ #include "kfd_priv.h" struct svm_range_bo { - struct amdgpu_bo *bo; - struct kref kref; - struct list_head range_list; /* all svm ranges shared this bo */ - spinlock_t list_lock; + struct amdgpu_bo *bo; + struct kref kref; + struct list_head range_list; /* all svm ranges shared this bo */ + spinlock_t list_lock; + struct amdgpu_amdkfd_fence *eviction_fence; + struct work_struct eviction_work; + struct svm_range_list *svms; + uint32_t evicting; }; enum svm_work_list_ops { @@ -161,6 +165,7 @@ int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, struct list_head *deferred_update_list); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint64_t addr); +int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, enum svm_work_list_ops op); -- 2.31.0 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
WARNING: multiple messages have this Message-ID (diff)
From: Felix Kuehling <Felix.Kuehling@amd.com> To: dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org Cc: Alex Sierra <alex.sierra@amd.com>, Philip Yang <Philip.Yang@amd.com> Subject: [PATCH 26/44] drm/amdkfd: add svm_bo eviction mechanism support Date: Mon, 22 Mar 2021 06:58:42 -0400 [thread overview] Message-ID: <20210322105900.14068-27-Felix.Kuehling@amd.com> (raw) In-Reply-To: <20210322105900.14068-1-Felix.Kuehling@amd.com> svm_bo eviction mechanism is different from regular BOs. Every SVM_BO created contains one eviction fence and one worker item for eviction process. SVM_BOs can be attached to one or more pranges. For SVM_BO eviction mechanism, TTM will start to call enable_signal callback for every SVM_BO until VRAM space is available. Here, all the ttm_evict calls are synchronous, this guarantees that each eviction has completed and the fence has signaled before it returns. Signed-off-by: Alex Sierra <alex.sierra@amd.com> Signed-off-by: Philip Yang <Philip.Yang@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 186 +++++++++++++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 13 +- 2 files changed, 153 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c791d91cb45d..3a7b842b362c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -34,6 +34,7 @@ #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1 +static void svm_range_evict_svm_bo_worker(struct work_struct *work); static bool svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, @@ -332,7 +333,15 @@ static void svm_range_bo_release(struct kref *kref) spin_lock(&svm_bo->list_lock); } spin_unlock(&svm_bo->list_lock); - + if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) { + /* We're not in the eviction worker. + * Signal the fence and synchronize with any + * pending eviction work. + */ + dma_fence_signal(&svm_bo->eviction_fence->base); + cancel_work_sync(&svm_bo->eviction_work); + } + dma_fence_put(&svm_bo->eviction_fence->base); amdgpu_bo_unref(&svm_bo->bo); kfree(svm_bo); } @@ -345,6 +354,61 @@ static void svm_range_bo_unref(struct svm_range_bo *svm_bo) kref_put(&svm_bo->kref, svm_range_bo_release); } +static bool svm_range_validate_svm_bo(struct svm_range *prange) +{ + mutex_lock(&prange->lock); + if (!prange->svm_bo) { + mutex_unlock(&prange->lock); + return false; + } + if (prange->ttm_res) { + /* We still have a reference, all is well */ + mutex_unlock(&prange->lock); + return true; + } + if (svm_bo_ref_unless_zero(prange->svm_bo)) { + if (READ_ONCE(prange->svm_bo->evicting)) { + struct dma_fence *f; + struct svm_range_bo *svm_bo; + /* The BO is getting evicted, + * we need to get a new one + */ + mutex_unlock(&prange->lock); + svm_bo = prange->svm_bo; + f = dma_fence_get(&svm_bo->eviction_fence->base); + svm_range_bo_unref(prange->svm_bo); + /* wait for the fence to avoid long spin-loop + * at list_empty_careful + */ + dma_fence_wait(f, false); + dma_fence_put(f); + } else { + /* The BO was still around and we got + * a new reference to it + */ + mutex_unlock(&prange->lock); + pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n", + prange->svms, prange->start, prange->last); + + prange->ttm_res = &prange->svm_bo->bo->tbo.mem; + return true; + } + + } else { + mutex_unlock(&prange->lock); + } + + /* We need a new svm_bo. Spin-loop to wait for concurrent + * svm_range_bo_release to finish removing this range from + * its range list. After this, it is safe to reuse the + * svm_bo pointer and svm_bo_list head. + */ + while (!list_empty_careful(&prange->svm_bo_list)) + ; + + return false; +} + static struct svm_range_bo *svm_range_bo_new(void) { struct svm_range_bo *svm_bo; @@ -364,72 +428,56 @@ int svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, bool clear) { - struct amdkfd_process_info *process_info; struct amdgpu_bo_param bp; struct svm_range_bo *svm_bo; struct amdgpu_bo_user *ubo; struct amdgpu_bo *bo; struct kfd_process *p; + struct mm_struct *mm; int r; - pr_debug("[0x%lx 0x%lx]\n", prange->start, prange->last); - mutex_lock(&prange->lock); - if (prange->svm_bo) { - if (prange->ttm_res) { - /* We still have a reference, all is well */ - mutex_unlock(&prange->lock); - return 0; - } - if (svm_bo_ref_unless_zero(prange->svm_bo)) { - /* The BO was still around and we got - * a new reference to it - */ - mutex_unlock(&prange->lock); - pr_debug("reuse old bo [0x%lx 0x%lx]\n", - prange->start, prange->last); - - prange->ttm_res = &prange->svm_bo->bo->tbo.mem; - return 0; - } - - mutex_unlock(&prange->lock); - - /* We need a new svm_bo. Spin-loop to wait for concurrent - * svm_range_bo_release to finish removing this range from - * its range list. After this, it is safe to reuse the - * svm_bo pointer and svm_bo_list head. - */ - while (!list_empty_careful(&prange->svm_bo_list)) - ; + p = container_of(prange->svms, struct kfd_process, svms); + pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, + prange->start, prange->last); - } else { - mutex_unlock(&prange->lock); - } + if (svm_range_validate_svm_bo(prange)) + return 0; svm_bo = svm_range_bo_new(); if (!svm_bo) { pr_debug("failed to alloc svm bo\n"); return -ENOMEM; } - + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("failed to get mm\n"); + kfree(svm_bo); + return -ESRCH; + } + svm_bo->svms = prange->svms; + svm_bo->eviction_fence = + amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), + mm, + svm_bo); + mmput(mm); + INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker); + svm_bo->evicting = 0; memset(&bp, 0, sizeof(bp)); bp.size = prange->npages * PAGE_SIZE; bp.byte_align = PAGE_SIZE; bp.domain = AMDGPU_GEM_DOMAIN_VRAM; bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0; + bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO; bp.type = ttm_bo_type_device; bp.resv = NULL; r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) { pr_debug("failed %d to create bo\n", r); - kfree(svm_bo); - return r; + goto create_bo_failed; } bo = &ubo->bo; - - p = container_of(prange->svms, struct kfd_process, svms); r = amdgpu_bo_reserve(bo, true); if (r) { pr_debug("failed %d to reserve bo\n", r); @@ -442,8 +490,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, amdgpu_bo_unreserve(bo); goto reserve_bo_failed; } - process_info = p->kgd_process_info; - amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); + amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true); amdgpu_bo_unreserve(bo); @@ -459,8 +506,10 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, return 0; reserve_bo_failed: - kfree(svm_bo); amdgpu_bo_unref(&bo); +create_bo_failed: + dma_fence_put(&svm_bo->eviction_fence->base); + kfree(svm_bo); prange->ttm_res = NULL; return r; @@ -2267,6 +2316,59 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, return r; } +int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence) +{ + if (!fence) + return -EINVAL; + + if (dma_fence_is_signaled(&fence->base)) + return 0; + + if (fence->svm_bo) { + WRITE_ONCE(fence->svm_bo->evicting, 1); + schedule_work(&fence->svm_bo->eviction_work); + } + + return 0; +} + +static void svm_range_evict_svm_bo_worker(struct work_struct *work) +{ + struct svm_range_bo *svm_bo; + struct svm_range *prange; + struct kfd_process *p; + struct mm_struct *mm; + + svm_bo = container_of(work, struct svm_range_bo, eviction_work); + if (!svm_bo_ref_unless_zero(svm_bo)) + return; /* svm_bo was freed while eviction was pending */ + + /* svm_range_bo_release destroys this worker thread. So during + * the lifetime of this thread, kfd_process and mm will be valid. + */ + p = container_of(svm_bo->svms, struct kfd_process, svms); + mm = p->mm; + if (!mm) + return; + + mmap_read_lock(mm); + list_for_each_entry(prange, &svm_bo->range_list, svm_bo_list) { + pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, + prange->start, prange->last); + mutex_lock(&prange->migrate_mutex); + svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm); + mutex_unlock(&prange->migrate_mutex); + } + mmap_read_unlock(mm); + + dma_fence_signal(&svm_bo->eviction_fence->base); + /* This is the last reference to svm_bo, after svm_range_vram_node_free + * has been called in svm_migrate_vram_to_ram + */ + WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n"); + svm_range_bo_unref(svm_bo); +} + static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 3aa6f6b97481..f8e282ec9d8a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -33,10 +33,14 @@ #include "kfd_priv.h" struct svm_range_bo { - struct amdgpu_bo *bo; - struct kref kref; - struct list_head range_list; /* all svm ranges shared this bo */ - spinlock_t list_lock; + struct amdgpu_bo *bo; + struct kref kref; + struct list_head range_list; /* all svm ranges shared this bo */ + spinlock_t list_lock; + struct amdgpu_amdkfd_fence *eviction_fence; + struct work_struct eviction_work; + struct svm_range_list *svms; + uint32_t evicting; }; enum svm_work_list_ops { @@ -161,6 +165,7 @@ int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, struct list_head *deferred_update_list); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint64_t addr); +int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, enum svm_work_list_ops op); -- 2.31.0 _______________________________________________ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
next prev parent reply other threads:[~2021-03-22 11:08 UTC|newest] Thread overview: 100+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-03-22 10:58 [PATCH 00/44] Add HMM-based SVM memory manager to KFD v2 Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 01/44] drm/amdgpu: replace per_device_list by array Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-23 14:30 ` Kim, Jonathan 2021-03-23 14:30 ` Kim, Jonathan 2021-03-22 10:58 ` [PATCH 02/44] drm/amdkfd: helper to convert gpu id and idx Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 03/44] drm/amdkfd: add svm ioctl API Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 04/44] drm/amdkfd: register svm range Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 05/44] drm/amdkfd: add svm ioctl GET_ATTR op Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 06/44] drm/amdgpu: add common HMM get pages function Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 07/44] drm/amdkfd: validate svm range system memory Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 08/44] drm/amdkfd: deregister svm range Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 09/44] drm/amdgpu: export vm update mapping interface Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 10/44] drm/amdkfd: map svm range to GPUs Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 11/44] drm/amdkfd: svm range eviction and restore Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 12/44] drm/amdkfd: add xnack enabled flag to kfd_process Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 13/44] drm/amdkfd: add ioctl to configure and query xnack retries Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 14/44] drm/amdkfd: register HMM device private zone Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 15/44] drm/amdkfd: validate vram svm range from TTM Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 16/44] drm/amdkfd: support xgmi same hive mapping Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 17/44] drm/amdkfd: copy memory through gart table Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 18/44] drm/amdkfd: HMM migrate ram to vram Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 19/44] drm/amdkfd: HMM migrate vram to ram Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 20/44] drm/amdkfd: invalidate tables on page retry fault Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 21/44] drm/amdgpu: enable 48-bit IH timestamp counter Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 22/44] drm/amdkfd: page table restore through svm API Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 23/44] drm/amdkfd: SVM API call to restore page tables Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 24/44] drm/amdkfd: add svm_bo reference for eviction fence Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 25/44] drm/amdgpu: add param bit flag to create SVM BOs Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling [this message] 2021-03-22 10:58 ` [PATCH 26/44] drm/amdkfd: add svm_bo eviction mechanism support Felix Kuehling 2021-03-22 10:58 ` [PATCH 27/44] drm/amdgpu: svm bo enable_signal call condition Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 28/44] drm/amdgpu: add svm_bo eviction to enable_signal cb Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 29/44] drm/amdgpu: reserve fence slot to update page table Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 13:32 ` Christian König 2021-03-22 13:32 ` Christian König 2021-03-22 10:58 ` [PATCH 30/44] drm/amdkfd: refine migration policy with xnack on Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 31/44] drm/amdkfd: add svm range validate timestamp Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 32/44] drm/amdkfd: multiple gpu migrate vram to vram Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 33/44] drm/amdkfd: Add SVM API support capability bits Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 34/44] drm/amdkfd: Fix dma unmapping Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 35/44] drm/amdkfd: Call mutex_destroy Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 36/44] drm/amdkfd: Fix spurious restore failures Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 37/44] drm/amdkfd: Fix svm_bo_list locking in eviction worker Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 38/44] drm/amdkfd: Simplify split_by_granularity Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 39/44] drm/amdkfd: Point out several race conditions Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 40/44] drm/amdkfd: Return pdd from kfd_process_device_from_gduid Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 41/44] drm/amdkfd: Remove broken deferred mapping Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 42/44] drm/amdkfd: Allow invalid pages in migration.src Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:58 ` [PATCH 43/44] drm/amdkfd: Correct locking during migration and mapping Felix Kuehling 2021-03-22 10:58 ` Felix Kuehling 2021-03-22 10:59 ` [PATCH 44/44] drm/amdkfd: Nested locking and invalidation of child ranges Felix Kuehling 2021-03-22 10:59 ` Felix Kuehling 2021-03-22 14:15 ` [PATCH 00/44] Add HMM-based SVM memory manager to KFD v2 Daniel Vetter 2021-03-22 14:15 ` Daniel Vetter 2021-03-22 16:06 ` Felix Kuehling 2021-03-22 16:06 ` Felix Kuehling 2021-03-22 17:04 ` Daniel Vetter 2021-03-22 17:04 ` Daniel Vetter
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20210322105900.14068-27-Felix.Kuehling@amd.com \ --to=felix.kuehling@amd.com \ --cc=Philip.Yang@amd.com \ --cc=alex.sierra@amd.com \ --cc=amd-gfx@lists.freedesktop.org \ --cc=dri-devel@lists.freedesktop.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.