All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages
@ 2021-10-12 13:55 Philip Yang
  2021-10-12 13:55 ` [PATCH v2 2/3] drm/amdkfd: handle svm partial migration cpages 0 Philip Yang
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Philip Yang @ 2021-10-12 13:55 UTC (permalink / raw)
  To: amd-gfx; +Cc: Philip Yang

No function change, use pr_debug_ratelimited to avoid per page debug
message overflowing dmesg buf and console log.

use dev_err to show error message from unexpected situation, to provide
clue to help debug without enabling dynamic debug log. Define dev_fmt to
output function name in error message.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 34 +++++++++++++-----------
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c     | 17 +++++++-----
 2 files changed, 30 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index f53e17a94ad8..b05c0579d0b9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -20,7 +20,6 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  */
-
 #include <linux/types.h>
 #include <linux/hmm.h>
 #include <linux/dma-direction.h>
@@ -34,6 +33,11 @@
 #include "kfd_svm.h"
 #include "kfd_migrate.h"
 
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+#define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__
+
 static uint64_t
 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
 {
@@ -151,14 +155,14 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
 			gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
 		}
 		if (r) {
-			pr_debug("failed %d to create gart mapping\n", r);
+			dev_err(adev->dev, "fail %d create gart mapping\n", r);
 			goto out_unlock;
 		}
 
 		r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
 				       NULL, &next, false, true, false);
 		if (r) {
-			pr_debug("failed %d to copy memory\n", r);
+			dev_err(adev->dev, "fail %d to copy memory\n", r);
 			goto out_unlock;
 		}
 
@@ -285,7 +289,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 
 	r = svm_range_vram_node_new(adev, prange, true);
 	if (r) {
-		pr_debug("failed %d get 0x%llx pages from vram\n", r, npages);
+		dev_err(adev->dev, "fail %d to alloc vram\n", r);
 		goto out;
 	}
 
@@ -305,7 +309,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 					      DMA_TO_DEVICE);
 			r = dma_mapping_error(dev, src[i]);
 			if (r) {
-				pr_debug("failed %d dma_map_page\n", r);
+				dev_err(adev->dev, "fail %d dma_map_page\n", r);
 				goto out_free_vram_pages;
 			}
 		} else {
@@ -325,8 +329,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 			continue;
 		}
 
-		pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
-			 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
+		pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
+				     src[i] >> PAGE_SHIFT, page_to_pfn(spage));
 
 		if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
 			r = svm_migrate_copy_memory_gart(adev, src + i - j,
@@ -405,8 +409,8 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 
 	r = migrate_vma_setup(&migrate);
 	if (r) {
-		pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
-			 r, prange->svms, prange->start, prange->last);
+		dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
+			prange->start, prange->last);
 		goto out_free;
 	}
 	if (migrate.cpages != npages) {
@@ -506,7 +510,7 @@ static void svm_migrate_page_free(struct page *page)
 	struct svm_range_bo *svm_bo = page->zone_device_data;
 
 	if (svm_bo) {
-		pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref));
+		pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
 		svm_range_bo_unref(svm_bo);
 	}
 }
@@ -572,12 +576,12 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 		dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 		r = dma_mapping_error(dev, dst[i]);
 		if (r) {
-			pr_debug("failed %d dma_map_page\n", r);
+			dev_err(adev->dev, "fail %d dma_map_page\n", r);
 			goto out_oom;
 		}
 
-		pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n",
-			      dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
+		pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
+				     dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
 
 		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
 		migrate->dst[i] |= MIGRATE_PFN_LOCKED;
@@ -631,8 +635,8 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 
 	r = migrate_vma_setup(&migrate);
 	if (r) {
-		pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
-			 r, prange->svms, prange->start, prange->last);
+		dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
+			prange->start, prange->last);
 		goto out_free;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 425d55deca10..49c92713c2ad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -33,6 +33,11 @@
 #include "kfd_svm.h"
 #include "kfd_migrate.h"
 
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
+
 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
 
 /* Long enough to ensure no retry fault comes after svm range is restored and
@@ -158,17 +163,17 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 				   bo_adev->vm_manager.vram_base_offset -
 				   bo_adev->kfd.dev->pgmap.range.start;
 			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
-			pr_debug("vram address detected: 0x%llx\n", addr[i]);
+			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
 			continue;
 		}
 		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
 		r = dma_mapping_error(dev, addr[i]);
 		if (r) {
-			pr_debug("failed %d dma_map_page\n", r);
+			dev_err(dev, "failed %d dma_map_page\n", r);
 			return r;
 		}
-		pr_debug("dma mapping 0x%llx for page addr 0x%lx\n",
-			 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
+		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
+				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
 	}
 	return 0;
 }
@@ -217,7 +222,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
 	for (i = offset; i < offset + npages; i++) {
 		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
 			continue;
-		pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
+		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
 		dma_addr[i] = 0;
 	}
@@ -1454,7 +1459,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 		/* This should never happen. actual_loc gets set by
 		 * svm_migrate_ram_to_vram after allocating a BO.
 		 */
-		WARN(1, "VRAM BO missing during validation\n");
+		WARN_ONCE(1, "VRAM BO missing during validation\n");
 		return -EINVAL;
 	}
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/3] drm/amdkfd: handle svm partial migration cpages 0
  2021-10-12 13:55 [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages Philip Yang
@ 2021-10-12 13:55 ` Philip Yang
  2021-10-12 21:00   ` [PATCH v3 " Philip Yang
  2021-10-12 13:55 ` [PATCH v2 3/3] drm/amdkfd: create unregister svm range not overlap with TTM range Philip Yang
  2021-10-12 22:49 ` [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages Felix Kuehling
  2 siblings, 1 reply; 8+ messages in thread
From: Philip Yang @ 2021-10-12 13:55 UTC (permalink / raw)
  To: amd-gfx; +Cc: Philip Yang

migrate_vma_setup may return cpages 0, means 0 page can be migrated,
treat this as error case to skip the rest of vma migration steps.

Change svm_migrate_vma_to_vram and svm_migrate_vma_to_ram to return the
number of pages migrated successfully. The caller add up all the
successful migration pages and update prange->actual_loc only if the
total migrated pages is not 0.

This also remove the warning message "VRAM BO missing during
validation" if migration cpages is 0.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 92 +++++++++++++-----------
 1 file changed, 49 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b05c0579d0b9..dd0fd52d0158 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -376,7 +376,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 	return r;
 }
 
-static int
+static unsigned long
 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 			struct vm_area_struct *vma, uint64_t start,
 			uint64_t end)
@@ -413,33 +413,38 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 			prange->start, prange->last);
 		goto out_free;
 	}
-	if (migrate.cpages != npages) {
-		pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
-			 migrate.cpages,
-			 npages);
-	}
 
-	if (migrate.cpages) {
-		r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
-					     scratch);
-		migrate_vma_pages(&migrate);
-		svm_migrate_copy_done(adev, mfence);
-		migrate_vma_finalize(&migrate);
+	if (migrate.cpages != npages)
+		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+			 migrate.cpages, npages);
+	else
+		pr_debug("0x%lx pages migrated\n", migrate.cpages);
+
+	if (!migrate.cpages) {
+		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
+			 prange->start, prange->last);
+		goto out_free;
 	}
 
+	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+	migrate_vma_pages(&migrate);
+	svm_migrate_copy_done(adev, mfence);
+	migrate_vma_finalize(&migrate);
+
 	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
 	svm_range_free_dma_mappings(prange);
 
 out_free:
 	kvfree(buf);
 out:
-	if (!r) {
+	if (!r && migrate.cpages) {
 		pdd = svm_range_get_pdd_by_adev(prange, adev);
 		if (pdd)
 			WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
-	}
 
-	return r;
+		return migrate.cpages;
+	}
+	return 0;
 }
 
 /**
@@ -460,7 +465,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 	unsigned long addr, start, end;
 	struct vm_area_struct *vma;
 	struct amdgpu_device *adev;
-	int r = 0;
+	unsigned long cpages = 0;
 
 	if (prange->actual_loc == best_loc) {
 		pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
@@ -491,18 +496,15 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 			break;
 
 		next = min(vma->vm_end, end);
-		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
-		if (r) {
-			pr_debug("failed to migrate\n");
-			break;
-		}
+		cpages += svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
 		addr = next;
 	}
 
-	if (!r)
+	if (cpages) {
 		prange->actual_loc = best_loc;
-
-	return r;
+		return 0;
+	}
+	return -ENOMEM;
 }
 
 static void svm_migrate_page_free(struct page *page)
@@ -603,7 +605,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 	return r;
 }
 
-static int
+static unsigned long
 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 		       struct vm_area_struct *vma, uint64_t start, uint64_t end)
 {
@@ -640,31 +642,37 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 		goto out_free;
 	}
 
-	pr_debug("cpages %ld\n", migrate.cpages);
+	if (migrate.cpages != npages)
+		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+			 migrate.cpages, npages);
+	else
+		pr_debug("0x%lx pages migrated\n", migrate.cpages);
 
-	if (migrate.cpages) {
-		r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
-					    scratch, npages);
-		migrate_vma_pages(&migrate);
-		svm_migrate_copy_done(adev, mfence);
-		migrate_vma_finalize(&migrate);
-	} else {
+	if (!migrate.cpages) {
 		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
 			 prange->start, prange->last);
+		goto out_free;
 	}
 
+	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+				    scratch, npages);
+	migrate_vma_pages(&migrate);
+	svm_migrate_copy_done(adev, mfence);
+	migrate_vma_finalize(&migrate);
 	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
 
 out_free:
 	kvfree(buf);
 out:
-	if (!r) {
+	if (!r && migrate.cpages) {
 		pdd = svm_range_get_pdd_by_adev(prange, adev);
 		if (pdd)
 			WRITE_ONCE(pdd->page_out,
 				   pdd->page_out + migrate.cpages);
+
+		return migrate.cpages;
 	}
-	return r;
+	return 0;
 }
 
 /**
@@ -684,7 +692,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
 	unsigned long addr;
 	unsigned long start;
 	unsigned long end;
-	int r = 0;
+	unsigned long cpages = 0;
 
 	if (!prange->actual_loc) {
 		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
@@ -714,19 +722,17 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
 			break;
 
 		next = min(vma->vm_end, end);
-		r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
-		if (r) {
-			pr_debug("failed %d to migrate\n", r);
-			break;
-		}
+		cpages += svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
 		addr = next;
 	}
 
-	if (!r) {
+	if (cpages) {
 		svm_range_vram_node_free(prange);
 		prange->actual_loc = 0;
+
+		return 0;
 	}
-	return r;
+	return -ENOMEM;
 }
 
 /**
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 3/3] drm/amdkfd: create unregister svm range not overlap with TTM range
  2021-10-12 13:55 [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages Philip Yang
  2021-10-12 13:55 ` [PATCH v2 2/3] drm/amdkfd: handle svm partial migration cpages 0 Philip Yang
@ 2021-10-12 13:55 ` Philip Yang
  2021-10-12 22:49 ` [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages Felix Kuehling
  2 siblings, 0 replies; 8+ messages in thread
From: Philip Yang @ 2021-10-12 13:55 UTC (permalink / raw)
  To: amd-gfx; +Cc: Philip Yang

When creating new svm range to recover retry fault, avoid svm range
to overlap with ranges or userptr ranges managed by TTM, otherwise
svm migration will trigger TTM or userptr eviction, to evict user queues
unexpectedly.

Change helper amdgpu_ttm_tt_affect_userptr to return userptr which is
inside the range. Add helper svm_range_check_vm_userptr to scan all
userptr of the vm, and return overlap userptr bo start, last.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |  4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c    | 92 +++++++++++++++++++++++--
 3 files changed, 91 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index bd5dda8066fa..d784f8d3a834 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1220,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
  *
  */
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
-				  unsigned long end)
+				  unsigned long end, unsigned long *userptr)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	unsigned long size;
@@ -1235,6 +1235,8 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 	if (gtt->userptr > end || gtt->userptr + size <= start)
 		return false;
 
+	if (userptr)
+		*userptr = gtt->userptr;
 	return true;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index ba5c864b8de1..91a087f9dc7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -182,7 +182,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
-				  unsigned long end);
+				  unsigned long end, unsigned long *userptr);
 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
 				       int *last_invalidated);
 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 49c92713c2ad..f987c73b535e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -50,7 +50,9 @@ static bool
 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
 				    const struct mmu_notifier_range *range,
 				    unsigned long cur_seq);
-
+static int
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+		   uint64_t *bo_s, uint64_t *bo_l);
 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
 	.invalidate = svm_range_cpu_invalidate_pagetables,
 };
@@ -2308,6 +2310,7 @@ svm_range_best_restore_location(struct svm_range *prange,
 
 	return -1;
 }
+
 static int
 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
 				unsigned long *start, unsigned long *last)
@@ -2355,8 +2358,59 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
 		  vma->vm_end >> PAGE_SHIFT, *last);
 
 	return 0;
+}
+
+static int
+svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
+			   uint64_t *bo_s, uint64_t *bo_l)
+{
+	struct amdgpu_bo_va_mapping *mapping;
+	struct interval_tree_node *node;
+	struct amdgpu_bo *bo = NULL;
+	unsigned long userptr;
+	uint32_t i;
+	int r;
+
+	for (i = 0; i < p->n_pdds; i++) {
+		struct amdgpu_vm *vm;
+
+		if (!p->pdds[i]->drm_priv)
+			continue;
+
+		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
+		r = amdgpu_bo_reserve(vm->root.bo, false);
+		if (r)
+			return r;
 
+		/* Check userptr by searching entire vm->va interval tree */
+		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
+		while (node) {
+			mapping = container_of((struct rb_node *)node,
+					       struct amdgpu_bo_va_mapping, rb);
+			bo = mapping->bo_va->base.bo;
+
+			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
+							 start << PAGE_SHIFT,
+							 last << PAGE_SHIFT,
+							 &userptr)) {
+				node = interval_tree_iter_next(node, 0, ~0ULL);
+				continue;
+			}
+
+			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
+				 start, last);
+			if (bo_s && bo_l) {
+				*bo_s = userptr >> PAGE_SHIFT;
+				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
+			}
+			amdgpu_bo_unreserve(vm->root.bo);
+			return -EADDRINUSE;
+		}
+		amdgpu_bo_unreserve(vm->root.bo);
+	}
+	return 0;
 }
+
 static struct
 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
 						struct kfd_process *p,
@@ -2366,10 +2420,23 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
 	struct svm_range *prange = NULL;
 	unsigned long start, last;
 	uint32_t gpuid, gpuidx;
+	uint64_t bo_s = 0;
+	uint64_t bo_l = 0;
 
 	if (svm_range_get_range_boundaries(p, addr, &start, &last))
 		return NULL;
 
+	if (svm_range_check_vm(p, start, last, &bo_s, &bo_l) != -EADDRINUSE)
+		svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
+
+	if (addr >= bo_s && addr <= bo_l)
+		return NULL;
+
+	if (addr < bo_s)
+		last = bo_s - 1;
+	if (addr > bo_l)
+		start = bo_l + 1;
+
 	prange = svm_range_new(&p->svms, start, last);
 	if (!prange) {
 		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
@@ -2672,6 +2739,8 @@ int svm_range_list_init(struct kfd_process *p)
  * @p: current kfd_process
  * @start: range start address, in pages
  * @last: range last address, in pages
+ * @bo_s: mapping start address in pages if address range already mapped
+ * @bo_l: mapping last address in pages if address range already mapped
  *
  * The purpose is to avoid virtual address ranges already allocated by
  * kfd_ioctl_alloc_memory_of_gpu ioctl.
@@ -2686,8 +2755,11 @@ int svm_range_list_init(struct kfd_process *p)
  * a signal. Release all buffer reservations and return to user-space.
  */
 static int
-svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last)
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+		   uint64_t *bo_s, uint64_t *bo_l)
 {
+	struct amdgpu_bo_va_mapping *mapping;
+	struct interval_tree_node *node;
 	uint32_t i;
 	int r;
 
@@ -2701,8 +2773,17 @@ svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last)
 		r = amdgpu_bo_reserve(vm->root.bo, false);
 		if (r)
 			return r;
-		if (interval_tree_iter_first(&vm->va, start, last)) {
-			pr_debug("Range [0x%llx 0x%llx] already mapped\n", start, last);
+
+		node = interval_tree_iter_first(&vm->va, start, last);
+		if (node) {
+			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
+				 start, last);
+			mapping = container_of((struct rb_node *)node,
+					       struct amdgpu_bo_va_mapping, rb);
+			if (bo_s && bo_l) {
+				*bo_s = mapping->start;
+				*bo_l = mapping->last;
+			}
 			amdgpu_bo_unreserve(vm->root.bo);
 			return -EADDRINUSE;
 		}
@@ -2743,7 +2824,8 @@ svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
 		start = min(end, vma->vm_end);
 	} while (start < end);
 
-	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT);
+	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
+				  NULL);
 }
 
 /**
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v3 2/3] drm/amdkfd: handle svm partial migration cpages 0
  2021-10-12 13:55 ` [PATCH v2 2/3] drm/amdkfd: handle svm partial migration cpages 0 Philip Yang
@ 2021-10-12 21:00   ` Philip Yang
  2021-10-12 22:12     ` Felix Kuehling
  0 siblings, 1 reply; 8+ messages in thread
From: Philip Yang @ 2021-10-12 21:00 UTC (permalink / raw)
  To: amd-gfx; +Cc: Philip Yang

migrate_vma_setup may return cpages 0, means 0 page can be migrated,
treat this as error case to skip the rest of vma migration steps.

Change svm_migrate_vma_to_vram and svm_migrate_vma_to_ram to return the
number of pages migrated successfully or error code. The caller add up
all the successful migration pages and update prange->actual_loc only if
the total migrated pages is not 0.

This also removes the warning message "VRAM BO missing during
validation" if migration cpages is 0.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 90 ++++++++++++++----------
 1 file changed, 51 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b05c0579d0b9..d37f20b17586 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -376,7 +376,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 	return r;
 }
 
-static int
+static long
 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 			struct vm_area_struct *vma, uint64_t start,
 			uint64_t end)
@@ -413,32 +413,37 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 			prange->start, prange->last);
 		goto out_free;
 	}
-	if (migrate.cpages != npages) {
-		pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
-			 migrate.cpages,
-			 npages);
-	}
 
-	if (migrate.cpages) {
-		r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
-					     scratch);
-		migrate_vma_pages(&migrate);
-		svm_migrate_copy_done(adev, mfence);
-		migrate_vma_finalize(&migrate);
+	if (migrate.cpages != npages)
+		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+			 migrate.cpages, npages);
+	else
+		pr_debug("0x%lx pages migrated\n", migrate.cpages);
+
+	if (!migrate.cpages) {
+		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
+			 prange->start, prange->last);
+		goto out_free;
 	}
 
+	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+	migrate_vma_pages(&migrate);
+	svm_migrate_copy_done(adev, mfence);
+	migrate_vma_finalize(&migrate);
+
 	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
 	svm_range_free_dma_mappings(prange);
 
 out_free:
 	kvfree(buf);
 out:
-	if (!r) {
+	if (!r && migrate.cpages) {
 		pdd = svm_range_get_pdd_by_adev(prange, adev);
 		if (pdd)
 			WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
-	}
 
+		return migrate.cpages;
+	}
 	return r;
 }
 
@@ -460,7 +465,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 	unsigned long addr, start, end;
 	struct vm_area_struct *vma;
 	struct amdgpu_device *adev;
-	int r = 0;
+	unsigned long cpages = 0;
+	long r;
 
 	if (prange->actual_loc == best_loc) {
 		pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
@@ -492,17 +498,16 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 
 		next = min(vma->vm_end, end);
 		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
-		if (r) {
-			pr_debug("failed to migrate\n");
-			break;
-		}
+		if (r > 0)
+			cpages += r;
 		addr = next;
 	}
 
-	if (!r)
+	if (cpages) {
 		prange->actual_loc = best_loc;
-
-	return r;
+		return 0;
+	}
+	return -ENOMEM;
 }
 
 static void svm_migrate_page_free(struct page *page)
@@ -603,7 +608,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 	return r;
 }
 
-static int
+static long
 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 		       struct vm_area_struct *vma, uint64_t start, uint64_t end)
 {
@@ -640,29 +645,35 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 		goto out_free;
 	}
 
-	pr_debug("cpages %ld\n", migrate.cpages);
+	if (migrate.cpages != npages)
+		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+			 migrate.cpages, npages);
+	else
+		pr_debug("0x%lx pages migrated\n", migrate.cpages);
 
-	if (migrate.cpages) {
-		r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
-					    scratch, npages);
-		migrate_vma_pages(&migrate);
-		svm_migrate_copy_done(adev, mfence);
-		migrate_vma_finalize(&migrate);
-	} else {
+	if (!migrate.cpages) {
 		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
 			 prange->start, prange->last);
+		goto out_free;
 	}
 
+	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+				    scratch, npages);
+	migrate_vma_pages(&migrate);
+	svm_migrate_copy_done(adev, mfence);
+	migrate_vma_finalize(&migrate);
 	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
 
 out_free:
 	kvfree(buf);
 out:
-	if (!r) {
+	if (!r && migrate.cpages) {
 		pdd = svm_range_get_pdd_by_adev(prange, adev);
 		if (pdd)
 			WRITE_ONCE(pdd->page_out,
 				   pdd->page_out + migrate.cpages);
+
+		return migrate.cpages;
 	}
 	return r;
 }
@@ -684,7 +695,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
 	unsigned long addr;
 	unsigned long start;
 	unsigned long end;
-	int r = 0;
+	unsigned long cpages = 0;
+	long r;
 
 	if (!prange->actual_loc) {
 		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
@@ -715,18 +727,18 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
 
 		next = min(vma->vm_end, end);
 		r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
-		if (r) {
-			pr_debug("failed %d to migrate\n", r);
-			break;
-		}
+		if (r > 0)
+			cpages += r;
 		addr = next;
 	}
 
-	if (!r) {
+	if (cpages) {
 		svm_range_vram_node_free(prange);
 		prange->actual_loc = 0;
+
+		return 0;
 	}
-	return r;
+	return -ENOMEM;
 }
 
 /**
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/3] drm/amdkfd: handle svm partial migration cpages 0
  2021-10-12 21:00   ` [PATCH v3 " Philip Yang
@ 2021-10-12 22:12     ` Felix Kuehling
  2021-10-12 22:39       ` [PATCH v4 " Philip Yang
  0 siblings, 1 reply; 8+ messages in thread
From: Felix Kuehling @ 2021-10-12 22:12 UTC (permalink / raw)
  To: Philip Yang, amd-gfx


Am 2021-10-12 um 5:00 p.m. schrieb Philip Yang:
> migrate_vma_setup may return cpages 0, means 0 page can be migrated,
> treat this as error case to skip the rest of vma migration steps.
>
> Change svm_migrate_vma_to_vram and svm_migrate_vma_to_ram to return the
> number of pages migrated successfully or error code. The caller add up
> all the successful migration pages and update prange->actual_loc only if
> the total migrated pages is not 0.
>
> This also removes the warning message "VRAM BO missing during
> validation" if migration cpages is 0.
>
> Signed-off-by: Philip Yang <Philip.Yang@amd.com>
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 90 ++++++++++++++----------
>  1 file changed, 51 insertions(+), 39 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index b05c0579d0b9..d37f20b17586 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -376,7 +376,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  	return r;
>  }
>  
> -static int
> +static long
>  svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  			struct vm_area_struct *vma, uint64_t start,
>  			uint64_t end)
> @@ -413,32 +413,37 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  			prange->start, prange->last);
>  		goto out_free;
>  	}
> -	if (migrate.cpages != npages) {
> -		pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
> -			 migrate.cpages,
> -			 npages);
> -	}
>  
> -	if (migrate.cpages) {
> -		r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
> -					     scratch);
> -		migrate_vma_pages(&migrate);
> -		svm_migrate_copy_done(adev, mfence);
> -		migrate_vma_finalize(&migrate);
> +	if (migrate.cpages != npages)
> +		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
> +			 migrate.cpages, npages);
> +	else
> +		pr_debug("0x%lx pages migrated\n", migrate.cpages);
> +
> +	if (!migrate.cpages) {
> +		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
> +			 prange->start, prange->last);
> +		goto out_free;
>  	}
>  
> +	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
> +	migrate_vma_pages(&migrate);
> +	svm_migrate_copy_done(adev, mfence);
> +	migrate_vma_finalize(&migrate);
> +
>  	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
>  	svm_range_free_dma_mappings(prange);
>  
>  out_free:
>  	kvfree(buf);
>  out:
> -	if (!r) {
> +	if (!r && migrate.cpages) {
>  		pdd = svm_range_get_pdd_by_adev(prange, adev);
>  		if (pdd)
>  			WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
> -	}
>  
> +		return migrate.cpages;
> +	}
>  	return r;
>  }
>  
> @@ -460,7 +465,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
>  	unsigned long addr, start, end;
>  	struct vm_area_struct *vma;
>  	struct amdgpu_device *adev;
> -	int r = 0;
> +	unsigned long cpages = 0;
> +	long r;
>  
>  	if (prange->actual_loc == best_loc) {
>  		pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
> @@ -492,17 +498,16 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
>  
>  		next = min(vma->vm_end, end);
>  		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
> -		if (r) {
> -			pr_debug("failed to migrate\n");
> -			break;
> -		}
> +		if (r > 0)
> +			cpages += r;

I think you still want to break out of the loop here if r < 0,
potentially with a debug message.


>  		addr = next;
>  	}
>  
> -	if (!r)
> +	if (cpages) {
>  		prange->actual_loc = best_loc;
> -
> -	return r;
> +		return 0;
> +	}
> +	return -ENOMEM;
>  }
>  
>  static void svm_migrate_page_free(struct page *page)
> @@ -603,7 +608,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  	return r;
>  }
>  
> -static int
> +static long
>  svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  		       struct vm_area_struct *vma, uint64_t start, uint64_t end)
>  {
> @@ -640,29 +645,35 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  		goto out_free;
>  	}
>  
> -	pr_debug("cpages %ld\n", migrate.cpages);
> +	if (migrate.cpages != npages)
> +		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
> +			 migrate.cpages, npages);
> +	else
> +		pr_debug("0x%lx pages migrated\n", migrate.cpages);
>  
> -	if (migrate.cpages) {
> -		r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
> -					    scratch, npages);
> -		migrate_vma_pages(&migrate);
> -		svm_migrate_copy_done(adev, mfence);
> -		migrate_vma_finalize(&migrate);
> -	} else {
> +	if (!migrate.cpages) {
>  		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
>  			 prange->start, prange->last);
> +		goto out_free;
>  	}
>  
> +	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
> +				    scratch, npages);
> +	migrate_vma_pages(&migrate);
> +	svm_migrate_copy_done(adev, mfence);
> +	migrate_vma_finalize(&migrate);
>  	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
>  
>  out_free:
>  	kvfree(buf);
>  out:
> -	if (!r) {
> +	if (!r && migrate.cpages) {
>  		pdd = svm_range_get_pdd_by_adev(prange, adev);
>  		if (pdd)
>  			WRITE_ONCE(pdd->page_out,
>  				   pdd->page_out + migrate.cpages);
> +
> +		return migrate.cpages;
>  	}
>  	return r;
>  }
> @@ -684,7 +695,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
>  	unsigned long addr;
>  	unsigned long start;
>  	unsigned long end;
> -	int r = 0;
> +	unsigned long cpages = 0;
> +	long r;
>  
>  	if (!prange->actual_loc) {
>  		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
> @@ -715,18 +727,18 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
>  
>  		next = min(vma->vm_end, end);
>  		r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
> -		if (r) {
> -			pr_debug("failed %d to migrate\n", r);
> -			break;
> -		}
> +		if (r > 0)
> +			cpages += r;

Same as above.

Regards,
  Felix

>  		addr = next;
>  	}
>  
> -	if (!r) {
> +	if (cpages) {
>  		svm_range_vram_node_free(prange);
>  		prange->actual_loc = 0;
> +
> +		return 0;
>  	}
> -	return r;
> +	return -ENOMEM;
>  }
>  
>  /**

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v4 2/3] drm/amdkfd: handle svm partial migration cpages 0
  2021-10-12 22:12     ` Felix Kuehling
@ 2021-10-12 22:39       ` Philip Yang
  2021-10-12 22:49         ` Felix Kuehling
  0 siblings, 1 reply; 8+ messages in thread
From: Philip Yang @ 2021-10-12 22:39 UTC (permalink / raw)
  To: amd-gfx; +Cc: Philip Yang

migrate_vma_setup may return cpages 0, means 0 page can be migrated,
treat this as error case to skip the rest of vma migration steps.

Change svm_migrate_vma_to_vram and svm_migrate_vma_to_ram to return the
number of pages migrated successfully or error code. The caller add up
all the successful migration pages and update prange->actual_loc only if
the total migrated pages is not 0.

This also removes the warning message "VRAM BO missing during
validation" if migration cpages is 0.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 86 ++++++++++++++----------
 1 file changed, 52 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b05c0579d0b9..537e32f77eb5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -376,7 +376,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 	return r;
 }
 
-static int
+static long
 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 			struct vm_area_struct *vma, uint64_t start,
 			uint64_t end)
@@ -413,32 +413,37 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 			prange->start, prange->last);
 		goto out_free;
 	}
-	if (migrate.cpages != npages) {
-		pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
-			 migrate.cpages,
-			 npages);
-	}
 
-	if (migrate.cpages) {
-		r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
-					     scratch);
-		migrate_vma_pages(&migrate);
-		svm_migrate_copy_done(adev, mfence);
-		migrate_vma_finalize(&migrate);
+	if (migrate.cpages != npages)
+		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+			 migrate.cpages, npages);
+	else
+		pr_debug("0x%lx pages migrated\n", migrate.cpages);
+
+	if (!migrate.cpages) {
+		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
+			 prange->start, prange->last);
+		goto out_free;
 	}
 
+	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+	migrate_vma_pages(&migrate);
+	svm_migrate_copy_done(adev, mfence);
+	migrate_vma_finalize(&migrate);
+
 	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
 	svm_range_free_dma_mappings(prange);
 
 out_free:
 	kvfree(buf);
 out:
-	if (!r) {
+	if (!r && migrate.cpages) {
 		pdd = svm_range_get_pdd_by_adev(prange, adev);
 		if (pdd)
 			WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
-	}
 
+		return migrate.cpages;
+	}
 	return r;
 }
 
@@ -460,7 +465,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 	unsigned long addr, start, end;
 	struct vm_area_struct *vma;
 	struct amdgpu_device *adev;
-	int r = 0;
+	unsigned long cpages = 0;
+	long r = 0;
 
 	if (prange->actual_loc == best_loc) {
 		pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
@@ -492,17 +498,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 
 		next = min(vma->vm_end, end);
 		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
-		if (r) {
-			pr_debug("failed to migrate\n");
+		if (r < 0) {
+			pr_debug("failed %ld to migrate\n", r);
 			break;
 		}
+		if (r > 0)
+			cpages += r;
 		addr = next;
 	}
 
-	if (!r)
+	if (cpages)
 		prange->actual_loc = best_loc;
 
-	return r;
+	return r < 0 ? r : 0;
 }
 
 static void svm_migrate_page_free(struct page *page)
@@ -603,7 +611,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 	return r;
 }
 
-static int
+static long
 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 		       struct vm_area_struct *vma, uint64_t start, uint64_t end)
 {
@@ -640,29 +648,35 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 		goto out_free;
 	}
 
-	pr_debug("cpages %ld\n", migrate.cpages);
+	if (migrate.cpages != npages)
+		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+			 migrate.cpages, npages);
+	else
+		pr_debug("0x%lx pages migrated\n", migrate.cpages);
 
-	if (migrate.cpages) {
-		r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
-					    scratch, npages);
-		migrate_vma_pages(&migrate);
-		svm_migrate_copy_done(adev, mfence);
-		migrate_vma_finalize(&migrate);
-	} else {
+	if (!migrate.cpages) {
 		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
 			 prange->start, prange->last);
+		goto out_free;
 	}
 
+	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+				    scratch, npages);
+	migrate_vma_pages(&migrate);
+	svm_migrate_copy_done(adev, mfence);
+	migrate_vma_finalize(&migrate);
 	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
 
 out_free:
 	kvfree(buf);
 out:
-	if (!r) {
+	if (!r && migrate.cpages) {
 		pdd = svm_range_get_pdd_by_adev(prange, adev);
 		if (pdd)
 			WRITE_ONCE(pdd->page_out,
 				   pdd->page_out + migrate.cpages);
+
+		return migrate.cpages;
 	}
 	return r;
 }
@@ -684,7 +698,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
 	unsigned long addr;
 	unsigned long start;
 	unsigned long end;
-	int r = 0;
+	unsigned long cpages = 0;
+	long r = 0;
 
 	if (!prange->actual_loc) {
 		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
@@ -715,18 +730,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
 
 		next = min(vma->vm_end, end);
 		r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
-		if (r) {
-			pr_debug("failed %d to migrate\n", r);
+		if (r < 0) {
+			pr_debug("failed %ld to migrate\n", r);
 			break;
 		}
+		if (r > 0)
+			cpages += r;
 		addr = next;
 	}
 
-	if (!r) {
+	if (cpages) {
 		svm_range_vram_node_free(prange);
 		prange->actual_loc = 0;
 	}
-	return r;
+
+	return r < 0 ? r : 0;
 }
 
 /**
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v4 2/3] drm/amdkfd: handle svm partial migration cpages 0
  2021-10-12 22:39       ` [PATCH v4 " Philip Yang
@ 2021-10-12 22:49         ` Felix Kuehling
  0 siblings, 0 replies; 8+ messages in thread
From: Felix Kuehling @ 2021-10-12 22:49 UTC (permalink / raw)
  To: Philip Yang, amd-gfx


Am 2021-10-12 um 6:39 p.m. schrieb Philip Yang:
> migrate_vma_setup may return cpages 0, means 0 page can be migrated,
> treat this as error case to skip the rest of vma migration steps.
>
> Change svm_migrate_vma_to_vram and svm_migrate_vma_to_ram to return the
> number of pages migrated successfully or error code. The caller add up
> all the successful migration pages and update prange->actual_loc only if
> the total migrated pages is not 0.
>
> This also removes the warning message "VRAM BO missing during
> validation" if migration cpages is 0.
>
> Signed-off-by: Philip Yang <Philip.Yang@amd.com>
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 86 ++++++++++++++----------
>  1 file changed, 52 insertions(+), 34 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index b05c0579d0b9..537e32f77eb5 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -376,7 +376,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  	return r;
>  }
>  
> -static int
> +static long
>  svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  			struct vm_area_struct *vma, uint64_t start,
>  			uint64_t end)
> @@ -413,32 +413,37 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  			prange->start, prange->last);
>  		goto out_free;
>  	}
> -	if (migrate.cpages != npages) {
> -		pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
> -			 migrate.cpages,
> -			 npages);
> -	}
>  
> -	if (migrate.cpages) {
> -		r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
> -					     scratch);
> -		migrate_vma_pages(&migrate);
> -		svm_migrate_copy_done(adev, mfence);
> -		migrate_vma_finalize(&migrate);
> +	if (migrate.cpages != npages)
> +		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
> +			 migrate.cpages, npages);
> +	else
> +		pr_debug("0x%lx pages migrated\n", migrate.cpages);
> +
> +	if (!migrate.cpages) {
> +		pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
> +			 prange->start, prange->last);
> +		goto out_free;
>  	}
>  
> +	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
> +	migrate_vma_pages(&migrate);
> +	svm_migrate_copy_done(adev, mfence);
> +	migrate_vma_finalize(&migrate);
> +
>  	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
>  	svm_range_free_dma_mappings(prange);
>  
>  out_free:
>  	kvfree(buf);
>  out:
> -	if (!r) {
> +	if (!r && migrate.cpages) {
>  		pdd = svm_range_get_pdd_by_adev(prange, adev);
>  		if (pdd)
>  			WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
> -	}
>  
> +		return migrate.cpages;
> +	}
>  	return r;
>  }
>  
> @@ -460,7 +465,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
>  	unsigned long addr, start, end;
>  	struct vm_area_struct *vma;
>  	struct amdgpu_device *adev;
> -	int r = 0;
> +	unsigned long cpages = 0;
> +	long r = 0;
>  
>  	if (prange->actual_loc == best_loc) {
>  		pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
> @@ -492,17 +498,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
>  
>  		next = min(vma->vm_end, end);
>  		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
> -		if (r) {
> -			pr_debug("failed to migrate\n");
> +		if (r < 0) {
> +			pr_debug("failed %ld to migrate\n", r);
>  			break;
>  		}
> +		if (r > 0)

This should be "else if" or even just "else".


> +			cpages += r;
>  		addr = next;
>  	}
>  
> -	if (!r)
> +	if (cpages)
>  		prange->actual_loc = best_loc;
>  
> -	return r;
> +	return r < 0 ? r : 0;
>  }
>  
>  static void svm_migrate_page_free(struct page *page)
> @@ -603,7 +611,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  	return r;
>  }
>  
> -static int
> +static long
>  svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  		       struct vm_area_struct *vma, uint64_t start, uint64_t end)
>  {
> @@ -640,29 +648,35 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  		goto out_free;
>  	}
>  
> -	pr_debug("cpages %ld\n", migrate.cpages);
> +	if (migrate.cpages != npages)
> +		pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
> +			 migrate.cpages, npages);
> +	else
> +		pr_debug("0x%lx pages migrated\n", migrate.cpages);
>  
> -	if (migrate.cpages) {
> -		r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
> -					    scratch, npages);
> -		migrate_vma_pages(&migrate);
> -		svm_migrate_copy_done(adev, mfence);
> -		migrate_vma_finalize(&migrate);
> -	} else {
> +	if (!migrate.cpages) {
>  		pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
>  			 prange->start, prange->last);
> +		goto out_free;
>  	}
>  
> +	r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
> +				    scratch, npages);
> +	migrate_vma_pages(&migrate);
> +	svm_migrate_copy_done(adev, mfence);
> +	migrate_vma_finalize(&migrate);
>  	svm_range_dma_unmap(adev->dev, scratch, 0, npages);
>  
>  out_free:
>  	kvfree(buf);
>  out:
> -	if (!r) {
> +	if (!r && migrate.cpages) {
>  		pdd = svm_range_get_pdd_by_adev(prange, adev);
>  		if (pdd)
>  			WRITE_ONCE(pdd->page_out,
>  				   pdd->page_out + migrate.cpages);
> +
> +		return migrate.cpages;
>  	}
>  	return r;
>  }
> @@ -684,7 +698,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
>  	unsigned long addr;
>  	unsigned long start;
>  	unsigned long end;
> -	int r = 0;
> +	unsigned long cpages = 0;
> +	long r = 0;
>  
>  	if (!prange->actual_loc) {
>  		pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
> @@ -715,18 +730,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
>  
>  		next = min(vma->vm_end, end);
>  		r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
> -		if (r) {
> -			pr_debug("failed %d to migrate\n", r);
> +		if (r < 0) {
> +			pr_debug("failed %ld to migrate\n", r);
>  			break;
>  		}
> +		if (r > 0)

Same as above.

With that fixed, the patch is

Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>


> +			cpages += r;
>  		addr = next;
>  	}
>  
> -	if (!r) {
> +	if (cpages) {
>  		svm_range_vram_node_free(prange);
>  		prange->actual_loc = 0;
>  	}
> -	return r;
> +
> +	return r < 0 ? r : 0;
>  }
>  
>  /**

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages
  2021-10-12 13:55 [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages Philip Yang
  2021-10-12 13:55 ` [PATCH v2 2/3] drm/amdkfd: handle svm partial migration cpages 0 Philip Yang
  2021-10-12 13:55 ` [PATCH v2 3/3] drm/amdkfd: create unregister svm range not overlap with TTM range Philip Yang
@ 2021-10-12 22:49 ` Felix Kuehling
  2 siblings, 0 replies; 8+ messages in thread
From: Felix Kuehling @ 2021-10-12 22:49 UTC (permalink / raw)
  To: Philip Yang, amd-gfx

Am 2021-10-12 um 9:55 a.m. schrieb Philip Yang:
> No function change, use pr_debug_ratelimited to avoid per page debug
> message overflowing dmesg buf and console log.
>
> use dev_err to show error message from unexpected situation, to provide
> clue to help debug without enabling dynamic debug log. Define dev_fmt to
> output function name in error message.
>
> Signed-off-by: Philip Yang <Philip.Yang@amd.com>

Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>


> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 34 +++++++++++++-----------
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c     | 17 +++++++-----
>  2 files changed, 30 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index f53e17a94ad8..b05c0579d0b9 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -20,7 +20,6 @@
>   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>   * OTHER DEALINGS IN THE SOFTWARE.
>   */
> -
>  #include <linux/types.h>
>  #include <linux/hmm.h>
>  #include <linux/dma-direction.h>
> @@ -34,6 +33,11 @@
>  #include "kfd_svm.h"
>  #include "kfd_migrate.h"
>  
> +#ifdef dev_fmt
> +#undef dev_fmt
> +#endif
> +#define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__
> +
>  static uint64_t
>  svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
>  {
> @@ -151,14 +155,14 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
>  			gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
>  		}
>  		if (r) {
> -			pr_debug("failed %d to create gart mapping\n", r);
> +			dev_err(adev->dev, "fail %d create gart mapping\n", r);
>  			goto out_unlock;
>  		}
>  
>  		r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
>  				       NULL, &next, false, true, false);
>  		if (r) {
> -			pr_debug("failed %d to copy memory\n", r);
> +			dev_err(adev->dev, "fail %d to copy memory\n", r);
>  			goto out_unlock;
>  		}
>  
> @@ -285,7 +289,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  
>  	r = svm_range_vram_node_new(adev, prange, true);
>  	if (r) {
> -		pr_debug("failed %d get 0x%llx pages from vram\n", r, npages);
> +		dev_err(adev->dev, "fail %d to alloc vram\n", r);
>  		goto out;
>  	}
>  
> @@ -305,7 +309,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  					      DMA_TO_DEVICE);
>  			r = dma_mapping_error(dev, src[i]);
>  			if (r) {
> -				pr_debug("failed %d dma_map_page\n", r);
> +				dev_err(adev->dev, "fail %d dma_map_page\n", r);
>  				goto out_free_vram_pages;
>  			}
>  		} else {
> @@ -325,8 +329,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  			continue;
>  		}
>  
> -		pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
> -			 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
> +		pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
> +				     src[i] >> PAGE_SHIFT, page_to_pfn(spage));
>  
>  		if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
>  			r = svm_migrate_copy_memory_gart(adev, src + i - j,
> @@ -405,8 +409,8 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>  
>  	r = migrate_vma_setup(&migrate);
>  	if (r) {
> -		pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
> -			 r, prange->svms, prange->start, prange->last);
> +		dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
> +			prange->start, prange->last);
>  		goto out_free;
>  	}
>  	if (migrate.cpages != npages) {
> @@ -506,7 +510,7 @@ static void svm_migrate_page_free(struct page *page)
>  	struct svm_range_bo *svm_bo = page->zone_device_data;
>  
>  	if (svm_bo) {
> -		pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref));
> +		pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
>  		svm_range_bo_unref(svm_bo);
>  	}
>  }
> @@ -572,12 +576,12 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  		dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
>  		r = dma_mapping_error(dev, dst[i]);
>  		if (r) {
> -			pr_debug("failed %d dma_map_page\n", r);
> +			dev_err(adev->dev, "fail %d dma_map_page\n", r);
>  			goto out_oom;
>  		}
>  
> -		pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n",
> -			      dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
> +		pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
> +				     dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
>  
>  		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
>  		migrate->dst[i] |= MIGRATE_PFN_LOCKED;
> @@ -631,8 +635,8 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
>  
>  	r = migrate_vma_setup(&migrate);
>  	if (r) {
> -		pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
> -			 r, prange->svms, prange->start, prange->last);
> +		dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
> +			prange->start, prange->last);
>  		goto out_free;
>  	}
>  
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 425d55deca10..49c92713c2ad 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -33,6 +33,11 @@
>  #include "kfd_svm.h"
>  #include "kfd_migrate.h"
>  
> +#ifdef dev_fmt
> +#undef dev_fmt
> +#endif
> +#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
> +
>  #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
>  
>  /* Long enough to ensure no retry fault comes after svm range is restored and
> @@ -158,17 +163,17 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
>  				   bo_adev->vm_manager.vram_base_offset -
>  				   bo_adev->kfd.dev->pgmap.range.start;
>  			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
> -			pr_debug("vram address detected: 0x%llx\n", addr[i]);
> +			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
>  			continue;
>  		}
>  		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
>  		r = dma_mapping_error(dev, addr[i]);
>  		if (r) {
> -			pr_debug("failed %d dma_map_page\n", r);
> +			dev_err(dev, "failed %d dma_map_page\n", r);
>  			return r;
>  		}
> -		pr_debug("dma mapping 0x%llx for page addr 0x%lx\n",
> -			 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
> +		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
> +				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
>  	}
>  	return 0;
>  }
> @@ -217,7 +222,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
>  	for (i = offset; i < offset + npages; i++) {
>  		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
>  			continue;
> -		pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
> +		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
>  		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
>  		dma_addr[i] = 0;
>  	}
> @@ -1454,7 +1459,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
>  		/* This should never happen. actual_loc gets set by
>  		 * svm_migrate_ram_to_vram after allocating a BO.
>  		 */
> -		WARN(1, "VRAM BO missing during validation\n");
> +		WARN_ONCE(1, "VRAM BO missing during validation\n");
>  		return -EINVAL;
>  	}
>  

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-10-12 22:49 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-12 13:55 [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages Philip Yang
2021-10-12 13:55 ` [PATCH v2 2/3] drm/amdkfd: handle svm partial migration cpages 0 Philip Yang
2021-10-12 21:00   ` [PATCH v3 " Philip Yang
2021-10-12 22:12     ` Felix Kuehling
2021-10-12 22:39       ` [PATCH v4 " Philip Yang
2021-10-12 22:49         ` Felix Kuehling
2021-10-12 13:55 ` [PATCH v2 3/3] drm/amdkfd: create unregister svm range not overlap with TTM range Philip Yang
2021-10-12 22:49 ` [PATCH v2 1/3] drm/amdkfd: ratelimited svm debug messages Felix Kuehling

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.