dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: "Zeng, Oak" <Oak.Zeng@amd.com>
To: "Kuehling, Felix" <Felix.Kuehling@amd.com>,
	"amd-gfx@lists.freedesktop.org" <amd-gfx@lists.freedesktop.org>,
	"dri-devel@lists.freedesktop.org"
	<dri-devel@lists.freedesktop.org>
Subject: Re: [PATCH v2 06/10] drm/amdgpu: DMA map/unmap when updating GPU mappings
Date: Tue, 27 Apr 2021 00:23:31 +0000	[thread overview]
Message-ID: <95A9652F-CAC4-4D0B-82E8-7F84BABAB937@amd.com> (raw)
In-Reply-To: <20210422013058.6305-7-Felix.Kuehling@amd.com>



Regards,
Oak 

 

On 2021-04-21, 9:31 PM, "dri-devel on behalf of Felix Kuehling" <dri-devel-bounces@lists.freedesktop.org on behalf of Felix.Kuehling@amd.com> wrote:

    DMA map kfd_mem_attachments in update_gpuvm_pte. This function is called
    with the BO and page tables reserved, so we can safely update the DMA
    mapping.

    DMA unmap when a BO is unmapped from a GPU and before updating mappings
    in restore workers.

    Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
    ---
     .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 56 ++++++++++---------
     1 file changed, 29 insertions(+), 27 deletions(-)

    diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
    index 49d1af4aa5f1..7d25d886b98c 100644
    --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
    +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
    @@ -961,11 +961,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
     	return ret;
     }

    -static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
    +static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
     				struct kfd_mem_attachment *entry,
     				struct amdgpu_sync *sync)
     {
     	struct amdgpu_bo_va *bo_va = entry->bo_va;
    +	struct amdgpu_device *adev = entry->adev;
     	struct amdgpu_vm *vm = bo_va->base.vm;

     	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
    @@ -974,15 +975,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,

     	amdgpu_sync_fence(sync, bo_va->last_pt_update);

    -	return 0;
    +	kfd_mem_dmaunmap_attachment(mem, entry);
     }

    -static int update_gpuvm_pte(struct amdgpu_device *adev,
    -		struct kfd_mem_attachment *entry,
    -		struct amdgpu_sync *sync)
    +static int update_gpuvm_pte(struct kgd_mem *mem,
    +			    struct kfd_mem_attachment *entry,
    +			    struct amdgpu_sync *sync)
     {
    -	int ret;
     	struct amdgpu_bo_va *bo_va = entry->bo_va;
    +	struct amdgpu_device *adev = entry->adev;
    +	int ret;
    +
    +	ret = kfd_mem_dmamap_attachment(mem, entry);
Should the dma mapping be done in the kfd_mem_attach function on a memory object is attached to a vm the first time? Since each memory object can be mapped to many GPU or many VMs, by doing dma mapping the first it is attached can simplify the logics. Or even simpler, maybe we can just just dma map when a memory object is created - it wastes some iommu page table entry but really simplify the logic in this patch series. I found this series is not very easy to understand.
    +	if (ret)
    +		return ret;

     	/* Update the page tables  */
     	ret = amdgpu_vm_bo_update(adev, bo_va, false);
    @@ -994,14 +1000,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
     	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
     }

    -static int map_bo_to_gpuvm(struct amdgpu_device *adev,
    -		struct kfd_mem_attachment *entry, struct amdgpu_sync *sync,
    -		bool no_update_pte)
    +static int map_bo_to_gpuvm(struct kgd_mem *mem,
    +			   struct kfd_mem_attachment *entry,
    +			   struct amdgpu_sync *sync,
    +			   bool no_update_pte)
     {
     	int ret;

     	/* Set virtual address for the allocation */
    -	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
    +	ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
     			       amdgpu_bo_size(entry->bo_va->base.bo),
     			       entry->pte_flags);
     	if (ret) {
    @@ -1013,7 +1020,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
     	if (no_update_pte)
     		return 0;

    -	ret = update_gpuvm_pte(adev, entry, sync);
    +	ret = update_gpuvm_pte(mem, entry, sync);
     	if (ret) {
     		pr_err("update_gpuvm_pte() failed\n");
     		goto update_gpuvm_pte_failed;
    @@ -1022,7 +1029,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
     	return 0;

     update_gpuvm_pte_failed:
    -	unmap_bo_from_gpuvm(adev, entry, sync);
    +	unmap_bo_from_gpuvm(mem, entry, sync);
     	return ret;
     }

    @@ -1596,7 +1603,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
     		pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
     			 entry->va, entry->va + bo_size, entry);

    -		ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
    +		ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
     				      is_invalid_userptr);
     		if (ret) {
     			pr_err("Failed to map bo to gpuvm\n");
    @@ -1635,7 +1642,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
     int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
     		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
     {
    -	struct amdgpu_device *adev = get_amdgpu_device(kgd);
     	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
     	struct amdkfd_process_info *process_info = avm->process_info;
     	unsigned long bo_size = mem->bo->tbo.base.size;
    @@ -1670,13 +1676,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
     		pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
     			 entry->va, entry->va + bo_size, entry);

    -		ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
    -		if (ret == 0) {
    -			entry->is_mapped = false;
    -		} else {
    -			pr_err("failed to unmap VA 0x%llx\n", mem->va);
    -			goto unreserve_out;
    -		}
    +		unmap_bo_from_gpuvm(mem, entry, ctx.sync);
    +		entry->is_mapped = false;

     		mem->mapped_to_gpu_memory--;
     		pr_debug("\t DEC mapping count %d\n",
    @@ -2053,9 +2054,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
     			if (!attachment->is_mapped)
     				continue;

    -			ret = update_gpuvm_pte((struct amdgpu_device *)
    -					       attachment->adev,
    -					       attachment, &sync);
    +			kfd_mem_dmaunmap_attachment(mem, attachment);
    +			ret = update_gpuvm_pte(mem, attachment, &sync);
     			if (ret) {
     				pr_err("%s: update PTE failed\n", __func__);
     				/* make sure this gets validated again */
    @@ -2257,9 +2257,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
     			goto validate_map_fail;
     		}
     		list_for_each_entry(attachment, &mem->attachments, list) {
    -			ret = update_gpuvm_pte((struct amdgpu_device *)
    -					      attachment->adev, attachment,
    -					      &sync_obj);
    +			if (!attachment->is_mapped)
    +				continue;
    +
    +			kfd_mem_dmaunmap_attachment(mem, attachment);
    +			ret = update_gpuvm_pte(mem, attachment, &sync_obj);
     			if (ret) {
     				pr_debug("Memory eviction: update PTE failed. Try again\n");
     				goto validate_map_fail;
    -- 
    2.31.1

    _______________________________________________
    dri-devel mailing list
    dri-devel@lists.freedesktop.org
    https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Fdri-devel&amp;data=04%7C01%7Coak.zeng%40amd.com%7C867f4b956e9d4d2e539208d9052e6140%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637546519028781182%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=VM31e7X1NRqmm7u%2BLTzCqTO0c2fHa0j6PmIXT24eJY8%3D&amp;reserved=0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  reply	other threads:[~2021-04-27  0:23 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-22  1:30 [PATCH v2 00/10] Implement multi-GPU DMA mappings for KFD Felix Kuehling
2021-04-22  1:30 ` [PATCH v2 01/10] rock-dbg_defconfig: Enable Intel IOMMU Felix Kuehling
2021-04-22  1:30 ` [PATCH v2 02/10] drm/amdgpu: Rename kfd_bo_va_list to kfd_mem_attachment Felix Kuehling
2021-05-10 22:00   ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 03/10] drm/amdgpu: Keep a bo-reference per-attachment Felix Kuehling
2021-05-10 22:00   ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 04/10] drm/amdgpu: Simplify AQL queue mapping Felix Kuehling
2021-04-23  1:33   ` Zeng, Oak
2021-04-23  7:23     ` Felix Kuehling
2021-05-10 22:03       ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 05/10] drm/amdgpu: Add multi-GPU DMA mapping helpers Felix Kuehling
2021-04-27  0:09   ` Zeng, Oak
2021-04-27  3:41     ` Felix Kuehling
2021-05-10 22:05       ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 06/10] drm/amdgpu: DMA map/unmap when updating GPU mappings Felix Kuehling
2021-04-27  0:23   ` Zeng, Oak [this message]
2021-04-27  3:47     ` Felix Kuehling
2021-05-10 22:06       ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 07/10] drm/amdgpu: Move kfd_mem_attach outside reservation Felix Kuehling
2021-05-10 22:06   ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 08/10] drm/amdgpu: Add DMA mapping of GTT BOs Felix Kuehling
2021-04-27  0:35   ` Zeng, Oak
2021-04-27  3:56     ` Felix Kuehling
2021-04-27 14:29       ` Zeng, Oak
2021-04-27 15:08         ` Felix Kuehling
2021-05-10 22:07           ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 09/10] drm/ttm: Don't count pages in SG BOs against pages_limit Felix Kuehling
2021-05-10 22:08   ` Errabolu, Ramesh
2021-04-22  1:30 ` [PATCH v2 10/10] drm/amdgpu: Move dmabuf attach/detach to backend_(un)bind Felix Kuehling
2021-04-22 11:20   ` Christian König
2021-05-10 22:09     ` Errabolu, Ramesh
2021-04-27 15:16 ` [PATCH v2 00/10] Implement multi-GPU DMA mappings for KFD Zeng, Oak

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=95A9652F-CAC4-4D0B-82E8-7F84BABAB937@amd.com \
    --to=oak.zeng@amd.com \
    --cc=Felix.Kuehling@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).