All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <ckoenig.leichtzumerken@gmail.com>
To: Felix Kuehling <felix.kuehling@amd.com>, amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/5] drm/amdgpu: re-apply "use the new cursor in the VM code" v2
Date: Fri, 30 Apr 2021 10:54:05 +0200	[thread overview]
Message-ID: <022dc479-53ad-eecb-1ac0-4156dadbd18d@gmail.com> (raw)
In-Reply-To: <63266df9-ba17-c53d-53b9-87331415f057@amd.com>



Am 30.04.21 um 10:32 schrieb Felix Kuehling:
> Am 2021-04-27 um 6:54 a.m. schrieb Christian König:
>> Now that we found the underlying problem we can re-apply this patch.
>>
>> This reverts commit 867fee7f8821ff42e7308088cf0c3450ac49c17c.
>>
>> v2: rebase on KFD changes
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>

Thanks, just for this patch or the series?

Christian.

>
>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 55 +++++++++-----------------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>>   drivers/gpu/drm/amd/amdkfd/kfd_svm.c   |  3 +-
>>   3 files changed, 20 insertions(+), 40 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index dae51992c607..fa43d332a979 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -37,6 +37,7 @@
>>   #include "amdgpu_gmc.h"
>>   #include "amdgpu_xgmi.h"
>>   #include "amdgpu_dma_buf.h"
>> +#include "amdgpu_res_cursor.h"
>>   #include "kfd_svm.h"
>>   
>>   /**
>> @@ -1606,7 +1607,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
>>    * @last: last mapped entry
>>    * @flags: flags for the entries
>>    * @offset: offset into nodes and pages_addr
>> - * @nodes: array of drm_mm_nodes with the MC addresses
>> + * @res: ttm_resource to map
>>    * @pages_addr: DMA addresses to use for mapping
>>    * @fence: optional resulting fence
>>    *
>> @@ -1621,13 +1622,13 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   				bool unlocked, struct dma_resv *resv,
>>   				uint64_t start, uint64_t last,
>>   				uint64_t flags, uint64_t offset,
>> -				struct drm_mm_node *nodes,
>> +				struct ttm_resource *res,
>>   				dma_addr_t *pages_addr,
>>   				struct dma_fence **fence)
>>   {
>>   	struct amdgpu_vm_update_params params;
>> +	struct amdgpu_res_cursor cursor;
>>   	enum amdgpu_sync_mode sync_mode;
>> -	uint64_t pfn;
>>   	int r;
>>   
>>   	memset(&params, 0, sizeof(params));
>> @@ -1645,14 +1646,6 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   	else
>>   		sync_mode = AMDGPU_SYNC_EXPLICIT;
>>   
>> -	pfn = offset >> PAGE_SHIFT;
>> -	if (nodes) {
>> -		while (pfn >= nodes->size) {
>> -			pfn -= nodes->size;
>> -			++nodes;
>> -		}
>> -	}
>> -
>>   	amdgpu_vm_eviction_lock(vm);
>>   	if (vm->evicting) {
>>   		r = -EBUSY;
>> @@ -1671,23 +1664,17 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   	if (r)
>>   		goto error_unlock;
>>   
>> -	do {
>> +	amdgpu_res_first(res, offset, (last - start + 1) * AMDGPU_GPU_PAGE_SIZE,
>> +			 &cursor);
>> +	while (cursor.remaining) {
>>   		uint64_t tmp, num_entries, addr;
>>   
>> -
>> -		num_entries = last - start + 1;
>> -		if (nodes) {
>> -			addr = nodes->start << PAGE_SHIFT;
>> -			num_entries = min((nodes->size - pfn) *
>> -				AMDGPU_GPU_PAGES_IN_CPU_PAGE, num_entries);
>> -		} else {
>> -			addr = 0;
>> -		}
>> -
>> +		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
>>   		if (pages_addr) {
>>   			bool contiguous = true;
>>   
>>   			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
>> +				uint64_t pfn = cursor.start >> PAGE_SHIFT;
>>   				uint64_t count;
>>   
>>   				contiguous = pages_addr[pfn + 1] ==
>> @@ -1707,16 +1694,18 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   			}
>>   
>>   			if (!contiguous) {
>> -				addr = pfn << PAGE_SHIFT;
>> +				addr = cursor.start;
>>   				params.pages_addr = pages_addr;
>>   			} else {
>> -				addr = pages_addr[pfn];
>> +				addr = pages_addr[cursor.start >> PAGE_SHIFT];
>>   				params.pages_addr = NULL;
>>   			}
>>   
>>   		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
>> -			addr += bo_adev->vm_manager.vram_base_offset;
>> -			addr += pfn << PAGE_SHIFT;
>> +			addr = bo_adev->vm_manager.vram_base_offset +
>> +				cursor.start;
>> +		} else {
>> +			addr = 0;
>>   		}
>>   
>>   		tmp = start + num_entries;
>> @@ -1724,14 +1713,9 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   		if (r)
>>   			goto error_unlock;
>>   
>> -		pfn += num_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
>> -		if (nodes && nodes->size == pfn) {
>> -			pfn = 0;
>> -			++nodes;
>> -		}
>> +		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
>>   		start = tmp;
>> -
>> -	} while (unlikely(start != last + 1));
>> +	};
>>   
>>   	r = vm->update_funcs->commit(&params, fence);
>>   
>> @@ -1760,7 +1744,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
>>   	struct amdgpu_bo_va_mapping *mapping;
>>   	dma_addr_t *pages_addr = NULL;
>>   	struct ttm_resource *mem;
>> -	struct drm_mm_node *nodes;
>>   	struct dma_fence **last_update;
>>   	struct dma_resv *resv;
>>   	uint64_t flags;
>> @@ -1769,7 +1752,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
>>   
>>   	if (clear || !bo) {
>>   		mem = NULL;
>> -		nodes = NULL;
>>   		resv = vm->root.base.bo->tbo.base.resv;
>>   	} else {
>>   		struct drm_gem_object *obj = &bo->tbo.base;
>> @@ -1784,7 +1766,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
>>   				bo = gem_to_amdgpu_bo(gobj);
>>   		}
>>   		mem = &bo->tbo.mem;
>> -		nodes = mem->mm_node;
>>   		if (mem->mem_type == TTM_PL_TT)
>>   			pages_addr = bo->tbo.ttm->dma_address;
>>   	}
>> @@ -1833,7 +1814,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
>>   		r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
>>   						resv, mapping->start,
>>   						mapping->last, update_flags,
>> -						mapping->offset, nodes,
>> +						mapping->offset, mem,
>>   						pages_addr, last_update);
>>   		if (r)
>>   			return r;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> index e5a3f18be2b7..1ae5ea8db497 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> @@ -402,7 +402,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   				bool unlocked, struct dma_resv *resv,
>>   				uint64_t start, uint64_t last,
>>   				uint64_t flags, uint64_t offset,
>> -				struct drm_mm_node *nodes,
>> +				struct ttm_resource *res,
>>   				dma_addr_t *pages_addr,
>>   				struct dma_fence **fence);
>>   int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> index e4ce97ab6e26..0b0e76e16ddc 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> @@ -1151,8 +1151,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>>   					prange->mapping.start,
>>   					prange->mapping.last, pte_flags,
>>   					prange->mapping.offset,
>> -					prange->ttm_res ?
>> -						prange->ttm_res->mm_node : NULL,
>> +					prange->ttm_res,
>>   					dma_addr, &vm->last_update);
>>   	if (r) {
>>   		pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2021-04-30  8:54 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-27 10:54 [PATCH 1/5] drm/amdgpu: re-apply "use the new cursor in the VM code" v2 Christian König
2021-04-27 10:54 ` [PATCH 2/5] drm/amdgpu: use cursor functions in amdgpu_bo_in_cpu_visible_vram Christian König
2021-04-27 10:54 ` [PATCH 3/5] drm/amdgpu: set the contiguous flag if possible Christian König
2021-04-27 10:54 ` [PATCH 4/5] drm/amdgpu: check contiguous flags instead of mm_node Christian König
2021-04-27 10:54 ` [PATCH 5/5] drm/amdgpu: move struct amdgpu_vram_reservation into vram mgr Christian König
2021-04-30  8:32 ` [PATCH 1/5] drm/amdgpu: re-apply "use the new cursor in the VM code" v2 Felix Kuehling
2021-04-30  8:54   ` Christian König [this message]
2021-04-30 13:45     ` Felix Kuehling
2021-04-30 13:50       ` Christian König
2021-04-30 14:57         ` Felix Kuehling
2021-04-30 14:58           ` Christian König
2021-04-30 15:19 ` Nirmoy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=022dc479-53ad-eecb-1ac0-4156dadbd18d@gmail.com \
    --to=ckoenig.leichtzumerken@gmail.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=felix.kuehling@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.