dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: "Christian König" <ckoenig.leichtzumerken@gmail.com>
To: matthew.auld@intel.com, thomas_os@shipmail.org,
	dri-devel@lists.freedesktop.org
Subject: [PATCH 05/10] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram v2
Date: Wed,  2 Jun 2021 12:09:09 +0200	[thread overview]
Message-ID: <20210602100914.46246-5-christian.koenig@amd.com> (raw)
In-Reply-To: <20210602100914.46246-1-christian.koenig@amd.com>

Access to the mm_node is now forbidden. So instead of hand wiring that
use the cursor functionality.

v2: fix handling as pointed out by Philip.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 68 ++++--------------------
 1 file changed, 10 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index fd8f544f0de2..5ce8fa2ddab0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -29,6 +29,7 @@
 #include "amdgpu_object.h"
 #include "amdgpu_vm.h"
 #include "amdgpu_mn.h"
+#include "amdgpu_res_cursor.h"
 #include "kfd_priv.h"
 #include "kfd_svm.h"
 #include "kfd_migrate.h"
@@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
 	return r;
 }
 
-static uint64_t
-svm_migrate_node_physical_addr(struct amdgpu_device *adev,
-			       struct drm_mm_node **mm_node, uint64_t *offset)
-{
-	struct drm_mm_node *node = *mm_node;
-	uint64_t pos = *offset;
-
-	if (node->start == AMDGPU_BO_INVALID_OFFSET) {
-		pr_debug("drm node is not validated\n");
-		return 0;
-	}
-
-	pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
-		 node->size);
-
-	if (pos >= node->size) {
-		do  {
-			pos -= node->size;
-			node++;
-		} while (pos >= node->size);
-
-		*mm_node = node;
-		*offset = pos;
-	}
-
-	return (node->start + pos) << PAGE_SHIFT;
-}
-
 unsigned long
 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
 {
@@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 {
 	uint64_t npages = migrate->cpages;
 	struct device *dev = adev->dev;
-	struct drm_mm_node *node;
+	struct amdgpu_res_cursor cursor;
 	dma_addr_t *src;
 	uint64_t *dst;
-	uint64_t vram_addr;
-	uint64_t offset;
 	uint64_t i, j;
 	int r;
 
@@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 		goto out;
 	}
 
-	node = prange->ttm_res->mm_node;
-	offset = prange->offset;
-	vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
-	if (!vram_addr) {
-		WARN_ONCE(1, "vram node address is 0\n");
-		r = -ENOMEM;
-		goto out;
-	}
-
+	amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+			 npages << PAGE_SHIFT, &cursor);
 	for (i = j = 0; i < npages; i++) {
 		struct page *spage;
 
-		dst[i] = vram_addr + (j << PAGE_SHIFT);
+		dst[i] = cursor.start + (j << PAGE_SHIFT);
 		migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
 		svm_migrate_get_vram_page(prange, migrate->dst[i]);
 
@@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 						mfence);
 				if (r)
 					goto out_free_vram_pages;
-				offset += j;
-				vram_addr = (node->start + offset) << PAGE_SHIFT;
+				amdgpu_res_next(&cursor, j << PAGE_SHIFT);
 				j = 0;
 			} else {
-				offset++;
-				vram_addr += PAGE_SIZE;
-			}
-			if (offset >= node->size) {
-				node++;
-				pr_debug("next node size 0x%llx\n", node->size);
-				vram_addr = node->start << PAGE_SHIFT;
-				offset = 0;
+				amdgpu_res_next(&cursor, PAGE_SIZE);
 			}
 			continue;
 		}
@@ -373,19 +329,15 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 		pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
 			 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
 
-		if (j + offset >= node->size - 1 && i < npages - 1) {
+		if (j << PAGE_SHIFT >= cursor.size - 1 && i < npages - 1) {
 			r = svm_migrate_copy_memory_gart(adev, src + i - j,
 							 dst + i - j, j + 1,
 							 FROM_RAM_TO_VRAM,
 							 mfence);
 			if (r)
 				goto out_free_vram_pages;
-
-			node++;
-			pr_debug("next node size 0x%llx\n", node->size);
-			vram_addr = node->start << PAGE_SHIFT;
-			offset = 0;
-			j = 0;
+			amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
+			j= 0;
 		} else {
 			j++;
 		}
-- 
2.25.1


  parent reply	other threads:[~2021-06-02 10:09 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02 10:09 [PATCH 01/10] drm/ttm: allocate resource object instead of embedding it v2 Christian König
2021-06-02 10:09 ` [PATCH 02/10] drm/ttm: flip over the range manager to self allocated nodes Christian König
2021-06-02 11:44   ` Thomas Hellström (Intel)
2021-06-02 12:11     ` Christian König
2021-06-02 12:33       ` Thomas Hellström (Intel)
2021-06-02 13:07         ` Christian König
2021-06-02 14:13           ` Thomas Hellström (Intel)
2021-06-02 14:17             ` Christian König
2021-06-02 15:28               ` Thomas Hellström (Intel)
2021-06-02 18:41                 ` Christian König
2021-06-02 18:52                   ` Thomas Hellström (Intel)
2021-06-02 18:53                     ` Christian König
2021-06-02 10:09 ` [PATCH 03/10] drm/ttm: flip over the sys " Christian König
2021-06-03  7:51   ` Matthew Auld
2021-06-02 10:09 ` [PATCH 04/10] drm/amdgpu: revert "drm/amdgpu: stop allocating dummy GTT nodes" Christian König
2021-06-02 10:09 ` Christian König [this message]
2021-06-03  9:44   ` [PATCH 05/10] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram v2 Matthew Auld
2021-06-02 10:09 ` [PATCH 06/10] drm/amdgpu: switch the GTT backend to self alloc Christian König
2021-06-02 10:09 ` [PATCH 07/10] drm/amdgpu: switch the VRAM " Christian König
2021-06-02 10:09 ` [PATCH 08/10] drm/nouveau: switch the TTM backends " Christian König
2021-06-02 10:09 ` [PATCH 09/10] drm/vmwgfx: " Christian König
2021-06-02 10:09 ` [PATCH 10/10] drm/ttm: flip the switch for driver allocated resources v2 Christian König
2021-06-07 10:15   ` Thomas Hellström (Intel)
2021-06-07 10:37     ` Christian König
2021-06-07 10:44       ` Thomas Hellström (Intel)
2021-06-03  8:45 ` [PATCH 01/10] drm/ttm: allocate resource object instead of embedding it v2 Matthew Auld
2021-06-04 11:54   ` Christian König
2021-06-04  9:33 ` Thomas Hellström (Intel)
2021-06-07 16:40 ` Thomas Hellström (Intel)
2021-06-07 17:06   ` Thomas Hellström (Intel)
2021-06-07 17:54     ` Christian König
2021-06-07 17:58       ` Thomas Hellström (Intel)
2021-06-07 17:59         ` Christian König
2021-06-08  5:29           ` Thomas Hellström (Intel)
2021-06-08  7:14             ` Christian König
2021-06-08  7:17               ` Thomas Hellström (Intel)
2021-06-08  7:21                 ` Christian König
2021-06-08  9:38                   ` Das, Nirmoy
2021-06-08  9:40                     ` Das, Nirmoy
2021-06-08  9:42                       ` Christian König
2021-06-08  9:48                         ` Das, Nirmoy
2021-06-07 17:10   ` Christian König
2021-06-08  6:55 ` Thomas Hellström

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210602100914.46246-5-christian.koenig@amd.com \
    --to=ckoenig.leichtzumerken@gmail.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    --cc=thomas_os@shipmail.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).