dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Felix Kuehling <felix.kuehling@amd.com>
To: philip yang <yangp@amd.com>,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: christian.koenig@amd.com, tzimmermann@suse.de
Subject: Re: [PATCH 2/4] drm/amdkfd: Use drm_priv to pass VM from KFD to amdgpu
Date: Wed, 14 Apr 2021 11:47:13 -0400	[thread overview]
Message-ID: <fc3cad63-088b-449e-d3e8-19d60fb4fb8c@amd.com> (raw)
In-Reply-To: <3813f130-a255-0d63-3b40-60919a1ba8f8@amd.com>

Am 2021-04-14 um 11:21 a.m. schrieb philip yang:
>
>
> On 2021-04-07 7:12 p.m., Felix Kuehling wrote:
>> amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu needs the drm_priv to allow mmap
>> to access the BO through the corresponding file descriptor.
>>
>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>> ---
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h    | 14 ++--
>>  .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 69 +++++++++++--------
>>  drivers/gpu/drm/amd/amdkfd/kfd_process.c      |  5 +-
>>  3 files changed, 50 insertions(+), 38 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
>> index 5ffb07b02810..0d59bebd92af 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
>> @@ -236,20 +236,20 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
>>  /* GPUVM API */
>>  int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
>>  					struct file *filp, u32 pasid,
>> -					void **vm, void **process_info,
>> +					void **process_info,
>>  					struct dma_fence **ef);
>> -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
>> -uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
>> +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv);
>> +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
>>  int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
>>  		struct kgd_dev *kgd, uint64_t va, uint64_t size,
>> -		void *vm, struct kgd_mem **mem,
>> +		void *drm_priv, struct kgd_mem **mem,
>>  		uint64_t *offset, uint32_t flags);
>>  int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
>>  		struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
>>  int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
>> -		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
>> +		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
>>  int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
>> -		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
>> +		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
>>  int amdgpu_amdkfd_gpuvm_sync_memory(
>>  		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
>>  int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
>> @@ -260,7 +260,7 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
>>  					      struct kfd_vm_fault_info *info);
>>  int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
>>  				      struct dma_buf *dmabuf,
>> -				      uint64_t va, void *vm,
>> +				      uint64_t va, void *drm_priv,
>>  				      struct kgd_mem **mem, uint64_t *size,
>>  				      uint64_t *mmap_offset);
>>  int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> index 36012229ccc1..95442bcd60fb 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
>> @@ -951,6 +951,13 @@ static int process_update_pds(struct amdkfd_process_info *process_info,
>>  	return 0;
>>  }
>>  
>> +static struct amdgpu_vm *drm_priv_to_vm(struct drm_file *drm_priv)
>> +{
>> +	struct amdgpu_fpriv *fpriv = drm_priv->driver_priv;
>> +
>> +	return &fpriv->vm;
>> +}
>> +
>>  static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
>>  		       struct dma_fence **ef)
>>  {
>> @@ -1039,15 +1046,19 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
>>  
>>  int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
>>  					   struct file *filp, u32 pasid,
>> -					   void **vm, void **process_info,
>> +					   void **process_info,
>>  					   struct dma_fence **ef)
>>  {
>>  	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>> -	struct drm_file *drm_priv = filp->private_data;
>> -	struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
>> -	struct amdgpu_vm *avm = &drv_priv->vm;
>> +	struct amdgpu_fpriv *drv_priv;
>> +	struct amdgpu_vm *avm;
>>  	int ret;
>>  
>> +	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
>> +	if (ret)
>> +		return ret;
>> +	avm = &drv_priv->vm;
>> +
>>  	/* Already a compute VM? */
>>  	if (avm->process_info)
>>  		return -EINVAL;
>> @@ -1062,7 +1073,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
>>  	if (ret)
>>  		return ret;
>>  
>> -	*vm = (void *)avm;
>> +	amdgpu_vm_set_task_info(avm);
>>  
>>  	return 0;
>>  }
>> @@ -1103,15 +1114,17 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
>>  	}
>>  }
>>  
>> -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
>> +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
>>  {
>>  	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>> -	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
>> +	struct amdgpu_vm *avm;
>>  
>> -	if (WARN_ON(!kgd || !vm))
>> +	if (WARN_ON(!kgd || !drm_priv))
>>  		return;
>>  
>> -	pr_debug("Releasing process vm %p\n", vm);
>> +	avm = drm_priv_to_vm(drm_priv);
>> +
>> +	pr_debug("Releasing process vm %p\n", avm);
>>  
>>  	/* The original pasid of amdgpu vm has already been
>>  	 * released during making a amdgpu vm to a compute vm
>> @@ -1122,9 +1135,9 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
>>  	amdgpu_vm_release_compute(adev, avm);
>>  }
>>  
>> -uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
>> +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
>>  {
>> -	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
>> +	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
>>  	struct amdgpu_bo *pd = avm->root.base.bo;
>>  	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
>>  
>> @@ -1135,11 +1148,11 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
>>  
>>  int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
>>  		struct kgd_dev *kgd, uint64_t va, uint64_t size,
>> -		void *vm, struct kgd_mem **mem,
>> +		void *drm_priv, struct kgd_mem **mem,
>>  		uint64_t *offset, uint32_t flags)
>>  {
>>  	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>> -	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
>> +	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
>>  	enum ttm_bo_type bo_type = ttm_bo_type_device;
>>  	struct sg_table *sg = NULL;
>>  	uint64_t user_addr = 0;
>> @@ -1350,10 +1363,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
>>  }
>>  
>>  int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
>> -		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
>> +		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
>>  {
>>  	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>> -	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
>> +	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
>>  	int ret;
>>  	struct amdgpu_bo *bo;
>>  	uint32_t domain;
>> @@ -1394,9 +1407,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
>>  	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
>>  			mem->va,
>>  			mem->va + bo_size * (1 + mem->aql_queue),
>> -			vm, domain_string(domain));
>> +			avm, domain_string(domain));
>>  
>> -	ret = reserve_bo_and_vm(mem, vm, &ctx);
>> +	ret = reserve_bo_and_vm(mem, avm, &ctx);
>>  	if (unlikely(ret))
>>  		goto out;
>>  
>> @@ -1440,7 +1453,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
>>  	}
>>  
>>  	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
>> -		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
>> +		if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
>>  			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
>>  					entry->va, entry->va + bo_size,
>>  					entry);
>> @@ -1452,7 +1465,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
>>  				goto map_bo_to_gpuvm_failed;
>>  			}
>>  
>> -			ret = vm_update_pds(vm, ctx.sync);
>> +			ret = vm_update_pds(avm, ctx.sync);
>>  			if (ret) {
>>  				pr_err("Failed to update page directories\n");
>>  				goto map_bo_to_gpuvm_failed;
>> @@ -1488,11 +1501,11 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
>>  }
>>  
>>  int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
>> -		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
>> +		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
>>  {
>>  	struct amdgpu_device *adev = get_amdgpu_device(kgd);
>> -	struct amdkfd_process_info *process_info =
>> -		((struct amdgpu_vm *)vm)->process_info;
>> +	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
>> +	struct amdkfd_process_info *process_info = avm->process_info;
>>  	unsigned long bo_size = mem->bo->tbo.base.size;
>>  	struct kfd_bo_va_list *entry;
>>  	struct bo_vm_reservation_context ctx;
>> @@ -1500,7 +1513,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
>>  
>>  	mutex_lock(&mem->lock);
>>  
>> -	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
>> +	ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
>>  	if (unlikely(ret))
>>  		goto out;
>>  	/* If no VMs were reserved, it means the BO wasn't actually mapped */
>> @@ -1509,17 +1522,17 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
>>  		goto unreserve_out;
>>  	}
>>  
>> -	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
>> +	ret = vm_validate_pt_pd_bos(avm);
>>  	if (unlikely(ret))
>>  		goto unreserve_out;
>>  
>>  	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
>>  		mem->va,
>>  		mem->va + bo_size * (1 + mem->aql_queue),
>> -		vm);
>> +		avm);
>>  
>>  	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
>> -		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
>> +		if (entry->bo_va->base.vm == avm && entry->is_mapped) {
>>  			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
>>  					entry->va,
>>  					entry->va + bo_size,
>> @@ -1645,14 +1658,14 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
>>  
>>  int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
>>  				      struct dma_buf *dma_buf,
>> -				      uint64_t va, void *vm,
>> +				      uint64_t va, void *drm_priv,
>>  				      struct kgd_mem **mem, uint64_t *size,
>>  				      uint64_t *mmap_offset)
>>  {
>>  	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
>> +	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
>>  	struct drm_gem_object *obj;
>>  	struct amdgpu_bo *bo;
>> -	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
>>  
>>  	if (dma_buf->ops != &amdgpu_dmabuf_ops)
>>  		/* Can't handle non-graphics buffers */
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
>> index d97e330a5022..bad0ecd6ef87 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
>> @@ -1383,13 +1383,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
>>  
>>  	ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
>>  		dev->kgd, drm_file, p->pasid,
>> -		&pdd->vm, &p->kgd_process_info, &p->ef);
>> +		&p->kgd_process_info, &p->ef);
>>  	if (ret) {
>>  		pr_err("Failed to create process VM object\n");
>>  		return ret;
>>  	}
>> -
>> -	amdgpu_vm_set_task_info(pdd->vm);
>> +	pdd->vm = drm_file->private_data;
>>  
>
> Maybe it is better to read, rename pdd->vm to pdd->drm_priv as well?
>
I agree. I'll send out an update with that fixed.

Regards,
  Felix


> Philip
>
>>  	ret = kfd_process_device_reserve_ib_mem(pdd);
>>  	if (ret)
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  reply	other threads:[~2021-04-14 15:47 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-07 23:12 [PATCH 1/4] drm/amdkfd: Remove legacy code not acquiring VMs Felix Kuehling
2021-04-07 23:12 ` [PATCH 2/4] drm/amdkfd: Use drm_priv to pass VM from KFD to amdgpu Felix Kuehling
2021-04-14 15:21   ` philip yang
2021-04-14 15:47     ` Felix Kuehling [this message]
2021-04-07 23:12 ` [PATCH 3/4] drm/amdkfd: Allow access for mmapping KFD BOs Felix Kuehling
2021-04-14  6:43   ` Christian König
2021-04-14 15:37   ` philip yang
2021-04-14 15:46     ` Felix Kuehling
2021-04-07 23:12 ` [PATCH 4/4] drm/amdgpu: Remove verify_access shortcut for " Felix Kuehling
2021-04-14 15:38   ` philip yang
2021-04-14 15:15 ` [PATCH 1/4] drm/amdkfd: Remove legacy code not acquiring VMs philip yang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=fc3cad63-088b-449e-d3e8-19d60fb4fb8c@amd.com \
    --to=felix.kuehling@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=christian.koenig@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=tzimmermann@suse.de \
    --cc=yangp@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).