* [PATCH] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram
@ 2021-05-21 13:52 Christian König
2021-05-21 19:28 ` philip yang
0 siblings, 1 reply; 4+ messages in thread
From: Christian König @ 2021-05-21 13:52 UTC (permalink / raw)
To: yangp, felix.kuehling; +Cc: amd-gfx
Access to the mm_node is now forbidden. So instead of hand wiring that
use the cursor functionality.
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 76 +++---------------------
1 file changed, 9 insertions(+), 67 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index fd8f544f0de2..cb28d1e660af 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -29,6 +29,7 @@
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_mn.h"
+#include "amdgpu_res_cursor.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
#include "kfd_migrate.h"
@@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
return r;
}
-static uint64_t
-svm_migrate_node_physical_addr(struct amdgpu_device *adev,
- struct drm_mm_node **mm_node, uint64_t *offset)
-{
- struct drm_mm_node *node = *mm_node;
- uint64_t pos = *offset;
-
- if (node->start == AMDGPU_BO_INVALID_OFFSET) {
- pr_debug("drm node is not validated\n");
- return 0;
- }
-
- pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
- node->size);
-
- if (pos >= node->size) {
- do {
- pos -= node->size;
- node++;
- } while (pos >= node->size);
-
- *mm_node = node;
- *offset = pos;
- }
-
- return (node->start + pos) << PAGE_SHIFT;
-}
-
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{
@@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
{
uint64_t npages = migrate->cpages;
struct device *dev = adev->dev;
- struct drm_mm_node *node;
+ struct amdgpu_res_cursor cursor;
dma_addr_t *src;
uint64_t *dst;
- uint64_t vram_addr;
- uint64_t offset;
uint64_t i, j;
int r;
@@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
goto out;
}
- node = prange->ttm_res->mm_node;
- offset = prange->offset;
- vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
- if (!vram_addr) {
- WARN_ONCE(1, "vram node address is 0\n");
- r = -ENOMEM;
- goto out;
- }
-
+ amdgpu_res_first(prange->ttm_res, prange->offset, npages << PAGE_SHIFT,
+ &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
- dst[i] = vram_addr + (j << PAGE_SHIFT);
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]);
@@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
mfence);
if (r)
goto out_free_vram_pages;
- offset += j;
- vram_addr = (node->start + offset) << PAGE_SHIFT;
+ amdgpu_res_next(&cursor, j << PAGE_SHIFT);
j = 0;
} else {
- offset++;
- vram_addr += PAGE_SIZE;
- }
- if (offset >= node->size) {
- node++;
- pr_debug("next node size 0x%llx\n", node->size);
- vram_addr = node->start << PAGE_SHIFT;
- offset = 0;
+ amdgpu_res_next(&cursor, PAGE_SIZE);
}
continue;
}
@@ -373,22 +329,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
src[i] >> PAGE_SHIFT, page_to_pfn(spage));
- if (j + offset >= node->size - 1 && i < npages - 1) {
- r = svm_migrate_copy_memory_gart(adev, src + i - j,
- dst + i - j, j + 1,
- FROM_RAM_TO_VRAM,
- mfence);
- if (r)
- goto out_free_vram_pages;
-
- node++;
- pr_debug("next node size 0x%llx\n", node->size);
- vram_addr = node->start << PAGE_SHIFT;
- offset = 0;
- j = 0;
- } else {
- j++;
- }
+ amdgpu_res_next(&cursor, PAGE_SIZE);
+ j++;
}
r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
--
2.25.1
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram
2021-05-21 13:52 [PATCH] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram Christian König
@ 2021-05-21 19:28 ` philip yang
2021-05-23 17:10 ` Christian König
0 siblings, 1 reply; 4+ messages in thread
From: philip yang @ 2021-05-21 19:28 UTC (permalink / raw)
To: Christian König, felix.kuehling; +Cc: amd-gfx
[-- Attachment #1: Type: text/html, Size: 6385 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram
2021-05-21 19:28 ` philip yang
@ 2021-05-23 17:10 ` Christian König
2021-05-25 13:59 ` philip yang
0 siblings, 1 reply; 4+ messages in thread
From: Christian König @ 2021-05-23 17:10 UTC (permalink / raw)
To: philip yang, felix.kuehling; +Cc: amd-gfx
Am 21.05.21 um 21:28 schrieb philip yang:
>
> This simply the logic, several comments inline.
>
> Thanks,
>
> Philip
>
> On 2021-05-21 9:52 a.m., Christian König wrote:
>> Access to the mm_node is now forbidden. So instead of hand wiring that
>> use the cursor functionality.
>>
>> Signed-off-by: Christian König<christian.koenig@amd.com>
>> ---
>> drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 76 +++---------------------
>> 1 file changed, 9 insertions(+), 67 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
>> index fd8f544f0de2..cb28d1e660af 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
>> @@ -29,6 +29,7 @@
>> #include "amdgpu_object.h"
>> #include "amdgpu_vm.h"
>> #include "amdgpu_mn.h"
>> +#include "amdgpu_res_cursor.h"
>> #include "kfd_priv.h"
>> #include "kfd_svm.h"
>> #include "kfd_migrate.h"
>> @@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
>> return r;
>> }
>>
>> -static uint64_t
>> -svm_migrate_node_physical_addr(struct amdgpu_device *adev,
>> - struct drm_mm_node **mm_node, uint64_t *offset)
>> -{
>> - struct drm_mm_node *node = *mm_node;
>> - uint64_t pos = *offset;
>> -
>> - if (node->start == AMDGPU_BO_INVALID_OFFSET) {
>> - pr_debug("drm node is not validated\n");
>> - return 0;
>> - }
>> -
>> - pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
>> - node->size);
>> -
>> - if (pos >= node->size) {
>> - do {
>> - pos -= node->size;
>> - node++;
>> - } while (pos >= node->size);
>> -
>> - *mm_node = node;
>> - *offset = pos;
>> - }
>> -
>> - return (node->start + pos) << PAGE_SHIFT;
>> -}
>> -
>> unsigned long
>> svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
>> {
>> @@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>> {
>> uint64_t npages = migrate->cpages;
>> struct device *dev = adev->dev;
>> - struct drm_mm_node *node;
>> + struct amdgpu_res_cursor cursor;
>> dma_addr_t *src;
>> uint64_t *dst;
>> - uint64_t vram_addr;
>> - uint64_t offset;
>> uint64_t i, j;
>> int r;
>>
>> @@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>> goto out;
>> }
>>
>> - node = prange->ttm_res->mm_node;
>> - offset = prange->offset;
>> - vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
>> - if (!vram_addr) {
> The prange->ttm_res valid check is not needed because we already check
> svm_range_vram_node_new return value
>> - WARN_ONCE(1, "vram node address is 0\n");
>> - r = -ENOMEM;
>> - goto out;
>> - }
>> -
>> + amdgpu_res_first(prange->ttm_res, prange->offset, npages << PAGE_SHIFT,
>
> prange->offset<< PAGE_SHIFT
>
> amdgpu_res_first takes start and size in bytes, prange->offset use
> page aligned offset
>
Ah, yes good point.
>
>> + &cursor);
>> for (i = j = 0; i < npages; i++) {
>> struct page *spage;
>>
>> - dst[i] = vram_addr + (j << PAGE_SHIFT);
>> + dst[i] = cursor.start + (j << PAGE_SHIFT);
>> migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
>> svm_migrate_get_vram_page(prange, migrate->dst[i]);
>>
>> @@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>> mfence);
>> if (r)
>> goto out_free_vram_pages;
>> - offset += j;
>> - vram_addr = (node->start + offset) << PAGE_SHIFT;
>> + amdgpu_res_next(&cursor, j << PAGE_SHIFT);
>> j = 0;
>> } else {
>> - offset++;
>> - vram_addr += PAGE_SIZE;
>> - }
>> - if (offset >= node->size) {
>> - node++;
>> - pr_debug("next node size 0x%llx\n", node->size);
>> - vram_addr = node->start << PAGE_SHIFT;
>> - offset = 0;
>> + amdgpu_res_next(&cursor, PAGE_SIZE);
>> }
>> continue;
>> }
>> @@ -373,22 +329,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
>> pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
>> src[i] >> PAGE_SHIFT, page_to_pfn(spage));
>>
>> - if (j + offset >= node->size - 1 && i < npages - 1) {
>> - r = svm_migrate_copy_memory_gart(adev, src + i - j,
>> - dst + i - j, j + 1,
>> - FROM_RAM_TO_VRAM,
>> - mfence);
>> - if (r)
>> - goto out_free_vram_pages;
>> -
>> - node++;
>> - pr_debug("next node size 0x%llx\n", node->size);
>> - vram_addr = node->start << PAGE_SHIFT;
>> - offset = 0;
>> - j = 0;
>> - } else {
>> - j++;
>> - }
>> + amdgpu_res_next(&cursor, PAGE_SIZE);
>> + j++;
> Here to handle cross mm_node case.
>
> if (j >= cursor->size - 1 && i < npages - 1) {
>
> r = svm_migrate_copy_memory_gart(adev, src + i - j,
>
> dst + i - j, j + 1,
>
> FROM_RAM_TO_VRAM,
> mfence);
> if (r)
> goto out_free_vram_pages;
>
> amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
> j= 0;
> } else {
> j++;
> }
Yeah, that was the point I couldn't understand. Why would we want that
anyway?
Regards,
Christian.
>
>
>> }
>>
>> r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram
2021-05-23 17:10 ` Christian König
@ 2021-05-25 13:59 ` philip yang
0 siblings, 0 replies; 4+ messages in thread
From: philip yang @ 2021-05-25 13:59 UTC (permalink / raw)
To: Christian König, felix.kuehling; +Cc: amd-gfx
[-- Attachment #1: Type: text/html, Size: 16067 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-05-25 13:59 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-21 13:52 [PATCH] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram Christian König
2021-05-21 19:28 ` philip yang
2021-05-23 17:10 ` Christian König
2021-05-25 13:59 ` philip yang
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.