All of lore.kernel.org
 help / color / mirror / Atom feed
From: Felix Kuehling <felix.kuehling@gmail.com>
To: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: Alex Sierra <alex.sierra@amd.com>
Subject: [PATCH 09/10] drm/amdkfd: partially actual_loc removed
Date: Thu, 27 May 2021 16:56:05 -0400	[thread overview]
Message-ID: <20210527205606.2660-9-Felix.Kuehling@amd.com> (raw)
In-Reply-To: <20210527205606.2660-1-Felix.Kuehling@amd.com>

From: Alex Sierra <alex.sierra@amd.com>

actual_loc should not be used anymore, as pranges
could have mixed locations (VRAM & SYSRAM) at the
same time.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 12 +---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c     | 71 ++++++++++--------------
 2 files changed, 29 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index f71f8d7e2b72..acb9f64577a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -501,12 +501,6 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 	struct amdgpu_device *adev;
 	int r = 0;
 
-	if (prange->actual_loc == best_loc) {
-		pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
-			 prange->svms, prange->start, prange->last, best_loc);
-		return 0;
-	}
-
 	adev = svm_range_get_adev_by_id(prange, best_loc);
 	if (!adev) {
 		pr_debug("failed to get device by id 0x%x\n", best_loc);
@@ -791,11 +785,7 @@ int
 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
 		    struct mm_struct *mm)
 {
-	if  (!prange->actual_loc)
-		return svm_migrate_ram_to_vram(prange, best_loc, mm);
-	else
-		return svm_migrate_vram_to_vram(prange, best_loc, mm);
-
+	return svm_migrate_ram_to_vram(prange, best_loc, mm);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7b50395ec377..1e15a6170635 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1420,42 +1420,38 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 
 	svm_range_reserve_bos(&ctx);
 
-	if (!prange->actual_loc) {
-		p = container_of(prange->svms, struct kfd_process, svms);
-		owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
-							MAX_GPU_INSTANCE));
-		for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
-			if (kfd_svm_page_owner(p, idx) != owner) {
-				owner = NULL;
-				break;
-			}
-		}
-		r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
-					       prange->start << PAGE_SHIFT,
-					       prange->npages, &hmm_range,
-					       false, true, owner);
-		if (r) {
-			pr_debug("failed %d to get svm range pages\n", r);
-			goto unreserve_out;
-		}
-
-		r = svm_range_dma_map(prange, ctx.bitmap,
-				      hmm_range->hmm_pfns);
-		if (r) {
-			pr_debug("failed %d to dma map range\n", r);
-			goto unreserve_out;
+	p = container_of(prange->svms, struct kfd_process, svms);
+	owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
+						MAX_GPU_INSTANCE));
+	for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
+		if (kfd_svm_page_owner(p, idx) != owner) {
+			owner = NULL;
+			break;
 		}
+	}
+	r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+				       prange->start << PAGE_SHIFT,
+				       prange->npages, &hmm_range,
+				       false, true, owner);
+	if (r) {
+		pr_debug("failed %d to get svm range pages\n", r);
+		goto unreserve_out;
+	}
 
-		prange->validated_once = true;
+	r = svm_range_dma_map(prange, ctx.bitmap,
+			      hmm_range->hmm_pfns);
+	if (r) {
+		pr_debug("failed %d to dma map range\n", r);
+		goto unreserve_out;
 	}
 
+	prange->validated_once = true;
+
 	svm_range_lock(prange);
-	if (!prange->actual_loc) {
-		if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
-			pr_debug("hmm update the range, need validate again\n");
-			r = -EAGAIN;
-			goto unlock_out;
-		}
+	if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
+		pr_debug("hmm update the range, need validate again\n");
+		r = -EAGAIN;
+		goto unlock_out;
 	}
 	if (!list_empty(&prange->child_list)) {
 		pr_debug("range split by unmap in parallel, validate again\n");
@@ -2740,20 +2736,9 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
 	*migrated = false;
 	best_loc = svm_range_best_prefetch_location(prange);
 
-	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
-	    best_loc == prange->actual_loc)
+	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
 		return 0;
 
-	/*
-	 * Prefetch to GPU without host access flag, set actual_loc to gpu, then
-	 * validate on gpu and map to gpus will be handled afterwards.
-	 */
-	if (best_loc && !prange->actual_loc &&
-	    !(prange->flags & KFD_IOCTL_SVM_FLAG_HOST_ACCESS)) {
-		prange->actual_loc = best_loc;
-		return 0;
-	}
-
 	if (!best_loc) {
 		r = svm_migrate_vram_to_ram(prange, mm);
 		*migrated = !r;
-- 
2.31.1


WARNING: multiple messages have this Message-ID (diff)
From: Felix Kuehling <felix.kuehling@gmail.com>
To: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: Alex Sierra <alex.sierra@amd.com>
Subject: [PATCH 09/10] drm/amdkfd: partially actual_loc removed
Date: Thu, 27 May 2021 16:56:05 -0400	[thread overview]
Message-ID: <20210527205606.2660-9-Felix.Kuehling@amd.com> (raw)
In-Reply-To: <20210527205606.2660-1-Felix.Kuehling@amd.com>

From: Alex Sierra <alex.sierra@amd.com>

actual_loc should not be used anymore, as pranges
could have mixed locations (VRAM & SYSRAM) at the
same time.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 12 +---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c     | 71 ++++++++++--------------
 2 files changed, 29 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index f71f8d7e2b72..acb9f64577a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -501,12 +501,6 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 	struct amdgpu_device *adev;
 	int r = 0;
 
-	if (prange->actual_loc == best_loc) {
-		pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
-			 prange->svms, prange->start, prange->last, best_loc);
-		return 0;
-	}
-
 	adev = svm_range_get_adev_by_id(prange, best_loc);
 	if (!adev) {
 		pr_debug("failed to get device by id 0x%x\n", best_loc);
@@ -791,11 +785,7 @@ int
 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
 		    struct mm_struct *mm)
 {
-	if  (!prange->actual_loc)
-		return svm_migrate_ram_to_vram(prange, best_loc, mm);
-	else
-		return svm_migrate_vram_to_vram(prange, best_loc, mm);
-
+	return svm_migrate_ram_to_vram(prange, best_loc, mm);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7b50395ec377..1e15a6170635 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1420,42 +1420,38 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 
 	svm_range_reserve_bos(&ctx);
 
-	if (!prange->actual_loc) {
-		p = container_of(prange->svms, struct kfd_process, svms);
-		owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
-							MAX_GPU_INSTANCE));
-		for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
-			if (kfd_svm_page_owner(p, idx) != owner) {
-				owner = NULL;
-				break;
-			}
-		}
-		r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
-					       prange->start << PAGE_SHIFT,
-					       prange->npages, &hmm_range,
-					       false, true, owner);
-		if (r) {
-			pr_debug("failed %d to get svm range pages\n", r);
-			goto unreserve_out;
-		}
-
-		r = svm_range_dma_map(prange, ctx.bitmap,
-				      hmm_range->hmm_pfns);
-		if (r) {
-			pr_debug("failed %d to dma map range\n", r);
-			goto unreserve_out;
+	p = container_of(prange->svms, struct kfd_process, svms);
+	owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
+						MAX_GPU_INSTANCE));
+	for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
+		if (kfd_svm_page_owner(p, idx) != owner) {
+			owner = NULL;
+			break;
 		}
+	}
+	r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+				       prange->start << PAGE_SHIFT,
+				       prange->npages, &hmm_range,
+				       false, true, owner);
+	if (r) {
+		pr_debug("failed %d to get svm range pages\n", r);
+		goto unreserve_out;
+	}
 
-		prange->validated_once = true;
+	r = svm_range_dma_map(prange, ctx.bitmap,
+			      hmm_range->hmm_pfns);
+	if (r) {
+		pr_debug("failed %d to dma map range\n", r);
+		goto unreserve_out;
 	}
 
+	prange->validated_once = true;
+
 	svm_range_lock(prange);
-	if (!prange->actual_loc) {
-		if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
-			pr_debug("hmm update the range, need validate again\n");
-			r = -EAGAIN;
-			goto unlock_out;
-		}
+	if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
+		pr_debug("hmm update the range, need validate again\n");
+		r = -EAGAIN;
+		goto unlock_out;
 	}
 	if (!list_empty(&prange->child_list)) {
 		pr_debug("range split by unmap in parallel, validate again\n");
@@ -2740,20 +2736,9 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
 	*migrated = false;
 	best_loc = svm_range_best_prefetch_location(prange);
 
-	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
-	    best_loc == prange->actual_loc)
+	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
 		return 0;
 
-	/*
-	 * Prefetch to GPU without host access flag, set actual_loc to gpu, then
-	 * validate on gpu and map to gpus will be handled afterwards.
-	 */
-	if (best_loc && !prange->actual_loc &&
-	    !(prange->flags & KFD_IOCTL_SVM_FLAG_HOST_ACCESS)) {
-		prange->actual_loc = best_loc;
-		return 0;
-	}
-
 	if (!best_loc) {
 		r = svm_migrate_vram_to_ram(prange, mm);
 		*migrated = !r;
-- 
2.31.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2021-05-27 20:56 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-27 20:55 [PATCH 01/10] drm/amdkfd: device pgmap owner at the svm migrate init Felix Kuehling
2021-05-27 20:55 ` Felix Kuehling
2021-05-27 20:55 ` [PATCH 02/10] drm/amdkfd: add owner ref param to get hmm pages Felix Kuehling
2021-05-27 20:55   ` Felix Kuehling
2021-05-27 20:55 ` [PATCH 03/10] drm/amdkfd: set owner ref to svm range prefault Felix Kuehling
2021-05-27 20:55   ` Felix Kuehling
2021-05-27 20:56 ` [PATCH 04/10] drm/amdgpu: get owner ref in validate and map Felix Kuehling
2021-05-27 20:56   ` Felix Kuehling
2021-05-27 20:56 ` [PATCH 05/10] drm/amdkfd: classify and map mixed svm range pages in GPU Felix Kuehling
2021-05-27 20:56   ` Felix Kuehling
2021-05-27 20:56 ` [PATCH 06/10] drm/amdkfd: skip invalid pages during migrations Felix Kuehling
2021-05-27 20:56   ` Felix Kuehling
2021-05-27 20:56 ` [PATCH 07/10] drm/amdkfd: skip migration for pages already in VRAM Felix Kuehling
2021-05-27 20:56   ` Felix Kuehling
2021-05-27 20:56 ` [PATCH 08/10] drm/amdkfd: add invalid pages debug at vram migration Felix Kuehling
2021-05-27 20:56   ` Felix Kuehling
2021-05-27 20:56 ` Felix Kuehling [this message]
2021-05-27 20:56   ` [PATCH 09/10] drm/amdkfd: partially actual_loc removed Felix Kuehling
2021-05-27 20:56 ` [PATCH 10/10] drm/amdkfd: protect svm_bo ref in case prange has forked Felix Kuehling
2021-05-27 20:56   ` Felix Kuehling
2021-06-21 16:04 [PATCH 01/10] drm/amdkfd: device pgmap owner at the svm migrate init Alex Sierra
2021-06-21 16:04 ` [PATCH 09/10] drm/amdkfd: partially actual_loc removed Alex Sierra
2021-06-21 21:24   ` Felix Kuehling

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210527205606.2660-9-Felix.Kuehling@amd.com \
    --to=felix.kuehling@gmail.com \
    --cc=alex.sierra@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.