amd-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
@ 2021-04-20  1:52 Alex Sierra
  2021-04-21  0:16 ` philip yang
  2021-04-21  0:45 ` Felix Kuehling
  0 siblings, 2 replies; 11+ messages in thread
From: Alex Sierra @ 2021-04-20  1:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alex Sierra

SVM ranges are created for unregistered memory, triggered
by page faults. These ranges are migrated/mapped to
GPU VRAM memory.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 103 ++++++++++++++++++++++++++-
 1 file changed, 101 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 45dd055118eb..a8a92c533cf7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2179,6 +2179,84 @@ svm_range_best_restore_location(struct svm_range *prange,
 
 	return -1;
 }
+static int
+svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
+				unsigned long *start, unsigned long *last)
+{
+	struct vm_area_struct *vma;
+	struct interval_tree_node *node;
+	unsigned long start_limit, end_limit;
+
+	vma = find_vma(p->mm, addr);
+	if (!vma || addr < vma->vm_start) {
+		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
+		return -EFAULT;
+	}
+	start_limit = max(vma->vm_start,
+			(unsigned long)ALIGN_DOWN(addr, 2UL << 20)) >> PAGE_SHIFT;
+	end_limit = min(vma->vm_end,
+			(unsigned long)ALIGN(addr + 1, 2UL << 20)) >> PAGE_SHIFT;
+	/* First range that starts after the fault address */
+	node = interval_tree_iter_first(&p->svms.objects, (addr >> PAGE_SHIFT) + 1, ULONG_MAX);
+	if (node) {
+		end_limit = min(end_limit, node->start);
+		/* Last range that ends before the fault address */
+		node = container_of(rb_prev(&node->rb), struct interval_tree_node, rb);
+	} else {
+		/* Last range must end before addr because there was no range after addr */
+		node = container_of(rb_last(&p->svms.objects.rb_root),
+				    struct interval_tree_node, rb);
+	}
+	if (node)
+		start_limit = max(start_limit, node->last + 1);
+
+	*start = start_limit;
+	*last = end_limit - 1;
+
+	pr_debug("vma start: %lx start: %lx vma end: %lx last: %lx\n",
+		  vma->vm_start >> PAGE_SHIFT, *start,
+		  vma->vm_end >> PAGE_SHIFT, *last);
+
+	return 0;
+
+}
+static struct
+svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
+						struct kfd_process *p,
+						struct mm_struct *mm,
+						int64_t addr)
+{
+	struct svm_range *prange = NULL;
+	struct svm_range_list *svms;
+	unsigned long start, last;
+	uint32_t gpuid, gpuidx;
+
+	if (svm_range_get_range_boundaries(p, addr << PAGE_SHIFT,
+					   &start, &last))
+		return NULL;
+
+	svms = &p->svms;
+	prange = svm_range_new(&p->svms, start, last);
+	if (!prange) {
+		pr_debug("Failed to create prange in address [0x%llx]\\n", addr);
+		goto out;
+	}
+	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
+		pr_debug("failed to get gpuid from kgd\n");
+		svm_range_free(prange);
+		prange = NULL;
+		goto out;
+	}
+	prange->preferred_loc = gpuid;
+	prange->actual_loc = 0;
+	/* Gurantee prange is migrate it */
+	prange->validate_timestamp -= AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING;
+	svm_range_add_to_svms(prange);
+	svm_range_add_notifier_locked(mm, prange);
+
+out:
+	return prange;
+}
 
 /* svm_range_skip_recover - decide if prange can be recovered
  * @prange: svm range structure
@@ -2228,6 +2306,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 	struct kfd_process *p;
 	uint64_t timestamp;
 	int32_t best_loc, gpuidx;
+	bool write_locked = false;
 	int r = 0;
 
 	p = kfd_lookup_process_by_pasid(pasid);
@@ -2251,14 +2330,34 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 	}
 
 	mmap_read_lock(mm);
+retry_write_locked:
 	mutex_lock(&svms->lock);
 	prange = svm_range_from_addr(svms, addr, NULL);
 	if (!prange) {
 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
 			 svms, addr);
-		r = -EFAULT;
-		goto out_unlock_svms;
+		if (!write_locked) {
+			/* Need the write lock to create new range with MMU notifier.
+			 * Also flush pending deferred work to make sure the interval
+			 * tree is up to date before we add a new range
+			 */
+			mutex_unlock(&svms->lock);
+			mmap_read_unlock(mm);
+			svm_range_list_lock_and_flush_work(svms, mm);
+			write_locked = true;
+			goto retry_write_locked;
+		}
+		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
+		if (!prange) {
+			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
+			svms, addr);
+			mmap_write_downgrade(mm);
+			r = -EFAULT;
+			goto out_unlock_svms;
+		}
 	}
+	if (write_locked)
+		mmap_write_downgrade(mm);
 
 	mutex_lock(&prange->migrate_mutex);
 
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-20  1:52 [PATCH] drm/amdkfd: svm ranges creation for unregistered memory Alex Sierra
@ 2021-04-21  0:16 ` philip yang
  2021-04-21  0:45 ` Felix Kuehling
  1 sibling, 0 replies; 11+ messages in thread
From: philip yang @ 2021-04-21  0:16 UTC (permalink / raw)
  To: Alex Sierra, amd-gfx

[-- Attachment #1: Type: text/html, Size: 7393 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-20  1:52 [PATCH] drm/amdkfd: svm ranges creation for unregistered memory Alex Sierra
  2021-04-21  0:16 ` philip yang
@ 2021-04-21  0:45 ` Felix Kuehling
  2021-04-21  1:25   ` Felix Kuehling
  1 sibling, 1 reply; 11+ messages in thread
From: Felix Kuehling @ 2021-04-21  0:45 UTC (permalink / raw)
  To: amd-gfx, Alex Sierra

Am 2021-04-19 um 9:52 p.m. schrieb Alex Sierra:
> SVM ranges are created for unregistered memory, triggered
> by page faults. These ranges are migrated/mapped to
> GPU VRAM memory.
>
> Signed-off-by: Alex Sierra <alex.sierra@amd.com>

This looks generally good to me. One more nit-pick inline in addition to
Philip's comments. And one question.


> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 103 ++++++++++++++++++++++++++-
>  1 file changed, 101 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 45dd055118eb..a8a92c533cf7 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -2179,6 +2179,84 @@ svm_range_best_restore_location(struct svm_range *prange,
>  
>  	return -1;
>  }
> +static int
> +svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
> +				unsigned long *start, unsigned long *last)
> +{
> +	struct vm_area_struct *vma;
> +	struct interval_tree_node *node;
> +	unsigned long start_limit, end_limit;
> +
> +	vma = find_vma(p->mm, addr);
> +	if (!vma || addr < vma->vm_start) {
> +		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
> +		return -EFAULT;
> +	}
> +	start_limit = max(vma->vm_start,
> +			(unsigned long)ALIGN_DOWN(addr, 2UL << 20)) >> PAGE_SHIFT;
> +	end_limit = min(vma->vm_end,
> +			(unsigned long)ALIGN(addr + 1, 2UL << 20)) >> PAGE_SHIFT;
> +	/* First range that starts after the fault address */
> +	node = interval_tree_iter_first(&p->svms.objects, (addr >> PAGE_SHIFT) + 1, ULONG_MAX);
> +	if (node) {
> +		end_limit = min(end_limit, node->start);
> +		/* Last range that ends before the fault address */
> +		node = container_of(rb_prev(&node->rb), struct interval_tree_node, rb);
> +	} else {
> +		/* Last range must end before addr because there was no range after addr */
> +		node = container_of(rb_last(&p->svms.objects.rb_root),
> +				    struct interval_tree_node, rb);
> +	}
> +	if (node)
> +		start_limit = max(start_limit, node->last + 1);
> +
> +	*start = start_limit;
> +	*last = end_limit - 1;
> +
> +	pr_debug("vma start: %lx start: %lx vma end: %lx last: %lx\n",
> +		  vma->vm_start >> PAGE_SHIFT, *start,
> +		  vma->vm_end >> PAGE_SHIFT, *last);
> +
> +	return 0;
> +
> +}
> +static struct
> +svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
> +						struct kfd_process *p,
> +						struct mm_struct *mm,
> +						int64_t addr)
> +{
> +	struct svm_range *prange = NULL;
> +	struct svm_range_list *svms;
> +	unsigned long start, last;
> +	uint32_t gpuid, gpuidx;
> +
> +	if (svm_range_get_range_boundaries(p, addr << PAGE_SHIFT,
> +					   &start, &last))
> +		return NULL;
> +
> +	svms = &p->svms;
> +	prange = svm_range_new(&p->svms, start, last);
> +	if (!prange) {
> +		pr_debug("Failed to create prange in address [0x%llx]\\n", addr);
> +		goto out;

You can just return here, since you're not doing any cleanup at the out:
label.


> +	}
> +	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
> +		pr_debug("failed to get gpuid from kgd\n");
> +		svm_range_free(prange);
> +		prange = NULL;
> +		goto out;

Just return.


> +	}
> +	prange->preferred_loc = gpuid;
> +	prange->actual_loc = 0;
> +	/* Gurantee prange is migrate it */
> +	prange->validate_timestamp -= AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING;

Is this really specific to svm_range_create_unregistered_range? Or
should we always do this in svm_range_new to guarantee that new ranges
can get validated?

Regards,
  Felix


> +	svm_range_add_to_svms(prange);
> +	svm_range_add_notifier_locked(mm, prange);
> +
> +out:
> +	return prange;
> +}
>  
>  /* svm_range_skip_recover - decide if prange can be recovered
>   * @prange: svm range structure
> @@ -2228,6 +2306,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>  	struct kfd_process *p;
>  	uint64_t timestamp;
>  	int32_t best_loc, gpuidx;
> +	bool write_locked = false;
>  	int r = 0;
>  
>  	p = kfd_lookup_process_by_pasid(pasid);
> @@ -2251,14 +2330,34 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>  	}
>  
>  	mmap_read_lock(mm);
> +retry_write_locked:
>  	mutex_lock(&svms->lock);
>  	prange = svm_range_from_addr(svms, addr, NULL);
>  	if (!prange) {
>  		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
>  			 svms, addr);
> -		r = -EFAULT;
> -		goto out_unlock_svms;
> +		if (!write_locked) {
> +			/* Need the write lock to create new range with MMU notifier.
> +			 * Also flush pending deferred work to make sure the interval
> +			 * tree is up to date before we add a new range
> +			 */
> +			mutex_unlock(&svms->lock);
> +			mmap_read_unlock(mm);
> +			svm_range_list_lock_and_flush_work(svms, mm);
> +			write_locked = true;
> +			goto retry_write_locked;
> +		}
> +		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
> +		if (!prange) {
> +			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
> +			svms, addr);
> +			mmap_write_downgrade(mm);
> +			r = -EFAULT;
> +			goto out_unlock_svms;
> +		}
>  	}
> +	if (write_locked)
> +		mmap_write_downgrade(mm);
>  
>  	mutex_lock(&prange->migrate_mutex);
>  
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-21  0:45 ` Felix Kuehling
@ 2021-04-21  1:25   ` Felix Kuehling
  2021-04-22 13:08     ` philip yang
  0 siblings, 1 reply; 11+ messages in thread
From: Felix Kuehling @ 2021-04-21  1:25 UTC (permalink / raw)
  To: amd-gfx, Alex Sierra, Yang, Philip


Am 2021-04-20 um 8:45 p.m. schrieb Felix Kuehling:
> Am 2021-04-19 um 9:52 p.m. schrieb Alex Sierra:
>> SVM ranges are created for unregistered memory, triggered
>> by page faults. These ranges are migrated/mapped to
>> GPU VRAM memory.
>>
>> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
> This looks generally good to me. One more nit-pick inline in addition to
> Philip's comments. And one question.

I found another potential deadlock. See inline. [+Philip]


>
>
>> ---
>>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 103 ++++++++++++++++++++++++++-
>>  1 file changed, 101 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> index 45dd055118eb..a8a92c533cf7 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> @@ -2179,6 +2179,84 @@ svm_range_best_restore_location(struct svm_range *prange,
>>  
>>  	return -1;
>>  }
>> +static int
>> +svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
>> +				unsigned long *start, unsigned long *last)
>> +{
>> +	struct vm_area_struct *vma;
>> +	struct interval_tree_node *node;
>> +	unsigned long start_limit, end_limit;
>> +
>> +	vma = find_vma(p->mm, addr);
>> +	if (!vma || addr < vma->vm_start) {
>> +		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
>> +		return -EFAULT;
>> +	}
>> +	start_limit = max(vma->vm_start,
>> +			(unsigned long)ALIGN_DOWN(addr, 2UL << 20)) >> PAGE_SHIFT;
>> +	end_limit = min(vma->vm_end,
>> +			(unsigned long)ALIGN(addr + 1, 2UL << 20)) >> PAGE_SHIFT;
>> +	/* First range that starts after the fault address */
>> +	node = interval_tree_iter_first(&p->svms.objects, (addr >> PAGE_SHIFT) + 1, ULONG_MAX);
>> +	if (node) {
>> +		end_limit = min(end_limit, node->start);
>> +		/* Last range that ends before the fault address */
>> +		node = container_of(rb_prev(&node->rb), struct interval_tree_node, rb);
>> +	} else {
>> +		/* Last range must end before addr because there was no range after addr */
>> +		node = container_of(rb_last(&p->svms.objects.rb_root),
>> +				    struct interval_tree_node, rb);
>> +	}
>> +	if (node)
>> +		start_limit = max(start_limit, node->last + 1);
>> +
>> +	*start = start_limit;
>> +	*last = end_limit - 1;
>> +
>> +	pr_debug("vma start: %lx start: %lx vma end: %lx last: %lx\n",
>> +		  vma->vm_start >> PAGE_SHIFT, *start,
>> +		  vma->vm_end >> PAGE_SHIFT, *last);
>> +
>> +	return 0;
>> +
>> +}
>> +static struct
>> +svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
>> +						struct kfd_process *p,
>> +						struct mm_struct *mm,
>> +						int64_t addr)
>> +{
>> +	struct svm_range *prange = NULL;
>> +	struct svm_range_list *svms;
>> +	unsigned long start, last;
>> +	uint32_t gpuid, gpuidx;
>> +
>> +	if (svm_range_get_range_boundaries(p, addr << PAGE_SHIFT,
>> +					   &start, &last))
>> +		return NULL;
>> +
>> +	svms = &p->svms;
>> +	prange = svm_range_new(&p->svms, start, last);
>> +	if (!prange) {
>> +		pr_debug("Failed to create prange in address [0x%llx]\\n", addr);
>> +		goto out;
> You can just return here, since you're not doing any cleanup at the out:
> label.
>
>
>> +	}
>> +	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
>> +		pr_debug("failed to get gpuid from kgd\n");
>> +		svm_range_free(prange);
>> +		prange = NULL;
>> +		goto out;
> Just return.
>
>
>> +	}
>> +	prange->preferred_loc = gpuid;
>> +	prange->actual_loc = 0;
>> +	/* Gurantee prange is migrate it */
>> +	prange->validate_timestamp -= AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING;
> Is this really specific to svm_range_create_unregistered_range? Or
> should we always do this in svm_range_new to guarantee that new ranges
> can get validated?
>
> Regards,
>   Felix
>
>
>> +	svm_range_add_to_svms(prange);
>> +	svm_range_add_notifier_locked(mm, prange);
>> +
>> +out:
>> +	return prange;
>> +}
>>  
>>  /* svm_range_skip_recover - decide if prange can be recovered
>>   * @prange: svm range structure
>> @@ -2228,6 +2306,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>>  	struct kfd_process *p;
>>  	uint64_t timestamp;
>>  	int32_t best_loc, gpuidx;
>> +	bool write_locked = false;
>>  	int r = 0;
>>  
>>  	p = kfd_lookup_process_by_pasid(pasid);
>> @@ -2251,14 +2330,34 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>>  	}
>>  
>>  	mmap_read_lock(mm);
>> +retry_write_locked:
>>  	mutex_lock(&svms->lock);
>>  	prange = svm_range_from_addr(svms, addr, NULL);
>>  	if (!prange) {
>>  		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
>>  			 svms, addr);
>> -		r = -EFAULT;
>> -		goto out_unlock_svms;
>> +		if (!write_locked) {
>> +			/* Need the write lock to create new range with MMU notifier.
>> +			 * Also flush pending deferred work to make sure the interval
>> +			 * tree is up to date before we add a new range
>> +			 */
>> +			mutex_unlock(&svms->lock);
>> +			mmap_read_unlock(mm);
>> +			svm_range_list_lock_and_flush_work(svms, mm);

I think this can deadlock with a deferred worker trying to drain
interrupts (Philip's patch series). If we cannot flush deferred work
here, we need to be more careful creating new ranges to make sure they
don't conflict with added deferred or child ranges.

Regards,
  Felix


>> +			write_locked = true;
>> +			goto retry_write_locked;
>> +		}
>> +		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
>> +		if (!prange) {
>> +			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
>> +			svms, addr);
>> +			mmap_write_downgrade(mm);
>> +			r = -EFAULT;
>> +			goto out_unlock_svms;
>> +		}
>>  	}
>> +	if (write_locked)
>> +		mmap_write_downgrade(mm);
>>  
>>  	mutex_lock(&prange->migrate_mutex);
>>  
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-21  1:25   ` Felix Kuehling
@ 2021-04-22 13:08     ` philip yang
  2021-04-22 13:20       ` Felix Kuehling
  0 siblings, 1 reply; 11+ messages in thread
From: philip yang @ 2021-04-22 13:08 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx, Alex Sierra, Yang, Philip

[-- Attachment #1: Type: text/html, Size: 8610 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-22 13:08     ` philip yang
@ 2021-04-22 13:20       ` Felix Kuehling
  2021-04-22 15:16         ` philip yang
  0 siblings, 1 reply; 11+ messages in thread
From: Felix Kuehling @ 2021-04-22 13:20 UTC (permalink / raw)
  To: philip yang, amd-gfx, Alex Sierra, Yang, Philip

Am 2021-04-22 um 9:08 a.m. schrieb philip yang:
>
>
> On 2021-04-20 9:25 p.m., Felix Kuehling wrote:
> @@ -2251,14 +2330,34 @@ svm_range_restore_pages(struct amdgpu_device
> *adev, unsigned int pasid,
>>>>  	}
>>>>  
>>>>  	mmap_read_lock(mm);
>>>> +retry_write_locked:
>>>>  	mutex_lock(&svms->lock);
>>>>  	prange = svm_range_from_addr(svms, addr, NULL);
>>>>  	if (!prange) {
>>>>  		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
>>>>  			 svms, addr);
>>>> -		r = -EFAULT;
>>>> -		goto out_unlock_svms;
>>>> +		if (!write_locked) {
>>>> +			/* Need the write lock to create new range with MMU notifier.
>>>> +			 * Also flush pending deferred work to make sure the interval
>>>> +			 * tree is up to date before we add a new range
>>>> +			 */
>>>> +			mutex_unlock(&svms->lock);
>>>> +			mmap_read_unlock(mm);
>>>> +			svm_range_list_lock_and_flush_work(svms, mm);
>> I think this can deadlock with a deferred worker trying to drain
>> interrupts (Philip's patch series). If we cannot flush deferred work
>> here, we need to be more careful creating new ranges to make sure they
>> don't conflict with added deferred or child ranges.
>
> It's impossible to have deadlock with deferred worker to drain
> interrupts, because drain interrupt wait for restore_pages without
> taking any lock, and restore_pages flush deferred work without taking
> any lock too.
>
The deadlock does not come from holding or waiting for locks. It comes
from the worker waiting for interrupts to drain and the interrupt
handler waiting for the worker to finish with flush_work in
svm_range_list_lock_and_flush_work. If both are waiting for each other,
neither can make progress and you have a deadlock.

Regards,
  Felix


> Regards,
>
> Philip
>
>> Regards,
>>   Felix
>>
>>
>>>> +			write_locked = true;
>>>> +			goto retry_write_locked;
>>>> +		}
>>>> +		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
>>>> +		if (!prange) {
>>>> +			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
>>>> +			svms, addr);
>>>> +			mmap_write_downgrade(mm);
>>>> +			r = -EFAULT;
>>>> +			goto out_unlock_svms;
>>>> +		}
>>>>  	}
>>>> +	if (write_locked)
>>>> +		mmap_write_downgrade(mm);
>>>>  
>>>>  	mutex_lock(&prange->migrate_mutex);
>>>>  
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-22 13:20       ` Felix Kuehling
@ 2021-04-22 15:16         ` philip yang
  0 siblings, 0 replies; 11+ messages in thread
From: philip yang @ 2021-04-22 15:16 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx, Alex Sierra, Yang, Philip

[-- Attachment #1: Type: text/html, Size: 4076 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-22 14:47 Alex Sierra
@ 2021-04-22 15:31 ` Felix Kuehling
  0 siblings, 0 replies; 11+ messages in thread
From: Felix Kuehling @ 2021-04-22 15:31 UTC (permalink / raw)
  To: Alex Sierra, amd-gfx


Am 2021-04-22 um 10:47 a.m. schrieb Alex Sierra:
> SVM ranges are created for unregistered memory, triggered
> by page faults. These ranges are migrated/mapped to
> GPU VRAM memory.
>
> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 107 ++++++++++++++++++++++++++-
>  1 file changed, 104 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 45dd055118eb..44ff643e3c32 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -274,7 +274,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
>  	INIT_LIST_HEAD(&prange->deferred_list);
>  	INIT_LIST_HEAD(&prange->child_list);
>  	atomic_set(&prange->invalid, 0);
> -	prange->validate_timestamp = ktime_to_us(ktime_get());
> +	prange->validate_timestamp = 0;
>  	mutex_init(&prange->migrate_mutex);
>  	mutex_init(&prange->lock);
>  	svm_range_set_default_attributes(&prange->preferred_loc,
> @@ -2179,6 +2179,86 @@ svm_range_best_restore_location(struct svm_range *prange,
>  
>  	return -1;
>  }
> +static int
> +svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
> +				unsigned long *start, unsigned long *last)
> +{
> +	struct vm_area_struct *vma;
> +	struct interval_tree_node *node;
> +	unsigned long start_limit, end_limit;
> +
> +	vma = find_vma(p->mm, addr << PAGE_SHIFT);
> +	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
> +		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
> +		return -EFAULT;
> +	}
> +	start_limit = max(vma->vm_start >> PAGE_SHIFT,
> +		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
> +	end_limit = min(vma->vm_end >> PAGE_SHIFT,
> +		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
> +	/* First range that starts after the fault address */
> +	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
> +	if (node) {
> +		end_limit = min(end_limit, node->start);
> +		/* Last range that ends before the fault address */
> +		node = container_of(rb_prev(&node->rb),
> +				    struct interval_tree_node, rb);
> +	} else {
> +		/* Last range must end before addr because
> +		 * there was no range after addr
> +		 */
> +		node = container_of(rb_last(&p->svms.objects.rb_root),
> +				    struct interval_tree_node, rb);
> +	}
> +	if (node) {
> +		if (node->last >= addr) {
> +			WARN(1, "Overlap with prev node and page fault addr\n");
> +			return -EFAULT;
> +		}
> +		start_limit = max(start_limit, node->last + 1);
> +	}
> +
> +	*start = start_limit;
> +	*last = end_limit - 1;
> +
> +	pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n",
> +		  vma->vm_start >> PAGE_SHIFT, *start,
> +		  vma->vm_end >> PAGE_SHIFT, *last);
> +
> +	return 0;
> +
> +}
> +static struct
> +svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
> +						struct kfd_process *p,
> +						struct mm_struct *mm,
> +						int64_t addr)
> +{
> +	struct svm_range *prange = NULL;
> +	unsigned long start, last;
> +	uint32_t gpuid, gpuidx;
> +
> +	if (svm_range_get_range_boundaries(p, addr, &start, &last))
> +		return NULL;
> +
> +	prange = svm_range_new(&p->svms, start, last);
> +	if (!prange) {
> +		pr_debug("Failed to create prange in address [0x%llx]\\n", addr);
> +		return NULL;
> +	}
> +	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
> +		pr_debug("failed to get gpuid from kgd\n");
> +		svm_range_free(prange);
> +		return NULL;
> +	}
> +	prange->preferred_loc = gpuid;
> +	prange->actual_loc = 0;
> +	/* Gurantee prange is migrate it */
> +	svm_range_add_to_svms(prange);
> +	svm_range_add_notifier_locked(mm, prange);
> +
> +	return prange;
> +}
>  
>  /* svm_range_skip_recover - decide if prange can be recovered
>   * @prange: svm range structure
> @@ -2228,6 +2308,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>  	struct kfd_process *p;
>  	uint64_t timestamp;
>  	int32_t best_loc, gpuidx;
> +	bool write_locked = false;
>  	int r = 0;
>  
>  	p = kfd_lookup_process_by_pasid(pasid);
> @@ -2251,14 +2332,34 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>  	}
>  
>  	mmap_read_lock(mm);
> +retry_write_locked:
>  	mutex_lock(&svms->lock);
>  	prange = svm_range_from_addr(svms, addr, NULL);
>  	if (!prange) {
>  		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
>  			 svms, addr);
> -		r = -EFAULT;
> -		goto out_unlock_svms;
> +		if (!write_locked) {
> +			/* Need the write lock to create new range with MMU notifier.
> +			 * Also flush pending deferred work to make sure the interval
> +			 * tree is up to date before we add a new range
> +			 */
> +			mutex_unlock(&svms->lock);
> +			mmap_read_unlock(mm);
> +			svm_range_list_lock_and_flush_work(svms, mm);

Replace svm_range_list_lock_and_flush with mmap_write_lock to avoid the
deadlock we discussed. With that fixed, the patch is

Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>


> +			write_locked = true;
> +			goto retry_write_locked;
> +		}
> +		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
> +		if (!prange) {
> +			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
> +				 svms, addr);
> +			mmap_write_downgrade(mm);
> +			r = -EFAULT;
> +			goto out_unlock_svms;
> +		}
>  	}
> +	if (write_locked)
> +		mmap_write_downgrade(mm);
>  
>  	mutex_lock(&prange->migrate_mutex);
>  
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
@ 2021-04-22 14:47 Alex Sierra
  2021-04-22 15:31 ` Felix Kuehling
  0 siblings, 1 reply; 11+ messages in thread
From: Alex Sierra @ 2021-04-22 14:47 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alex Sierra

SVM ranges are created for unregistered memory, triggered
by page faults. These ranges are migrated/mapped to
GPU VRAM memory.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 107 ++++++++++++++++++++++++++-
 1 file changed, 104 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 45dd055118eb..44ff643e3c32 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -274,7 +274,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
 	INIT_LIST_HEAD(&prange->deferred_list);
 	INIT_LIST_HEAD(&prange->child_list);
 	atomic_set(&prange->invalid, 0);
-	prange->validate_timestamp = ktime_to_us(ktime_get());
+	prange->validate_timestamp = 0;
 	mutex_init(&prange->migrate_mutex);
 	mutex_init(&prange->lock);
 	svm_range_set_default_attributes(&prange->preferred_loc,
@@ -2179,6 +2179,86 @@ svm_range_best_restore_location(struct svm_range *prange,
 
 	return -1;
 }
+static int
+svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
+				unsigned long *start, unsigned long *last)
+{
+	struct vm_area_struct *vma;
+	struct interval_tree_node *node;
+	unsigned long start_limit, end_limit;
+
+	vma = find_vma(p->mm, addr << PAGE_SHIFT);
+	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
+		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
+		return -EFAULT;
+	}
+	start_limit = max(vma->vm_start >> PAGE_SHIFT,
+		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
+	end_limit = min(vma->vm_end >> PAGE_SHIFT,
+		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
+	/* First range that starts after the fault address */
+	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
+	if (node) {
+		end_limit = min(end_limit, node->start);
+		/* Last range that ends before the fault address */
+		node = container_of(rb_prev(&node->rb),
+				    struct interval_tree_node, rb);
+	} else {
+		/* Last range must end before addr because
+		 * there was no range after addr
+		 */
+		node = container_of(rb_last(&p->svms.objects.rb_root),
+				    struct interval_tree_node, rb);
+	}
+	if (node) {
+		if (node->last >= addr) {
+			WARN(1, "Overlap with prev node and page fault addr\n");
+			return -EFAULT;
+		}
+		start_limit = max(start_limit, node->last + 1);
+	}
+
+	*start = start_limit;
+	*last = end_limit - 1;
+
+	pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n",
+		  vma->vm_start >> PAGE_SHIFT, *start,
+		  vma->vm_end >> PAGE_SHIFT, *last);
+
+	return 0;
+
+}
+static struct
+svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
+						struct kfd_process *p,
+						struct mm_struct *mm,
+						int64_t addr)
+{
+	struct svm_range *prange = NULL;
+	unsigned long start, last;
+	uint32_t gpuid, gpuidx;
+
+	if (svm_range_get_range_boundaries(p, addr, &start, &last))
+		return NULL;
+
+	prange = svm_range_new(&p->svms, start, last);
+	if (!prange) {
+		pr_debug("Failed to create prange in address [0x%llx]\\n", addr);
+		return NULL;
+	}
+	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
+		pr_debug("failed to get gpuid from kgd\n");
+		svm_range_free(prange);
+		return NULL;
+	}
+	prange->preferred_loc = gpuid;
+	prange->actual_loc = 0;
+	/* Gurantee prange is migrate it */
+	svm_range_add_to_svms(prange);
+	svm_range_add_notifier_locked(mm, prange);
+
+	return prange;
+}
 
 /* svm_range_skip_recover - decide if prange can be recovered
  * @prange: svm range structure
@@ -2228,6 +2308,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 	struct kfd_process *p;
 	uint64_t timestamp;
 	int32_t best_loc, gpuidx;
+	bool write_locked = false;
 	int r = 0;
 
 	p = kfd_lookup_process_by_pasid(pasid);
@@ -2251,14 +2332,34 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 	}
 
 	mmap_read_lock(mm);
+retry_write_locked:
 	mutex_lock(&svms->lock);
 	prange = svm_range_from_addr(svms, addr, NULL);
 	if (!prange) {
 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
 			 svms, addr);
-		r = -EFAULT;
-		goto out_unlock_svms;
+		if (!write_locked) {
+			/* Need the write lock to create new range with MMU notifier.
+			 * Also flush pending deferred work to make sure the interval
+			 * tree is up to date before we add a new range
+			 */
+			mutex_unlock(&svms->lock);
+			mmap_read_unlock(mm);
+			svm_range_list_lock_and_flush_work(svms, mm);
+			write_locked = true;
+			goto retry_write_locked;
+		}
+		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
+		if (!prange) {
+			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
+				 svms, addr);
+			mmap_write_downgrade(mm);
+			r = -EFAULT;
+			goto out_unlock_svms;
+		}
 	}
+	if (write_locked)
+		mmap_write_downgrade(mm);
 
 	mutex_lock(&prange->migrate_mutex);
 
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
  2021-04-19 17:24 Alex Sierra
@ 2021-04-19 20:34 ` Felix Kuehling
  0 siblings, 0 replies; 11+ messages in thread
From: Felix Kuehling @ 2021-04-19 20:34 UTC (permalink / raw)
  To: Alex Sierra, amd-gfx

Am 2021-04-19 um 1:24 p.m. schrieb Alex Sierra:
> SVM ranges are created for unregistered memory, triggered
> by page faults. These ranges are migrated/mapped to
> GPU VRAM memory.
>
> Signed-off-by: Alex Sierra <alex.sierra@amd.com>
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 85 +++++++++++++++++++++++++++-
>  1 file changed, 82 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 45dd055118eb..4cbbfba01cae 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -2179,6 +2179,79 @@ svm_range_best_restore_location(struct svm_range *prange,
>  
>  	return -1;
>  }
> +static int
> +svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
> +				unsigned long *start, unsigned long *end)
> +{
> +	struct vm_area_struct *vma;
> +	unsigned long start_limit, end_limit;
> +
> +	vma = find_vma(p->mm, addr);
> +	if (!vma) {

This check is not correct. Look for other examples of find_vma in the
driver. It's possible that find_vma returns the first VMA that starts
after the specified address. The condition usually used after find_vma
is something like

	if (!vma || addr < vma->vm_start)
		return -EFAULT;


> +		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
> +		return -1;

Return a proper error code, i.e. -EFAULT;


> +	}
> +	start_limit = max(vma->vm_start,
> +			(unsigned long)ALIGN_DOWN(addr, 2UL << 20)) >> PAGE_SHIFT;
> +	addr >>= PAGE_SHIFT;
> +	*start = addr;
> +
> +	while (*start > start_limit &&
> +		!interval_tree_iter_first(&p->svms.objects, *start - 1, *start - 1))
> +		*start -= 1;

This loop doesn't really make sense. Calling interval_tree_iter_first in
a loop is weird. It would typically be called before a loop. In the loop
you'd call interval_tree_iter_next. But in this case you shouldn't need
a loop at all because you're just looking for one specific range.
Interval trees are supposed to make this more efficient than a linear
search.

I think what you want to do here is to find the last prange that ends
before addr. Something like this:

	start_limit = max(vma->vm_start,
			(unsigned long)ALIGN_DOWN(addr, 2UL << 20)) >> PAGE_SHIFT;
	end_limit = min(vma->end,
			(unsigned long)ALIGN(addr + 1, 2UL << 20)) >> PAGE_SHIFT;
	/* First range that starts after the fault address */
	node = interval_tree_first(&p->svms.objects, (addr >> PAGE_SHIFT) + 1, ULONG_MAX);
	if (node) {
		end_limit = min(end_limit, node->start);
		/* Last range that ends before the fault address */
		node = container_of(rb_prev(&node->rb), struct interval_tree_node, rb);
	} else {
		/* Last range must end before addr because there was no range after addr */
		node = container_of(rb_last(&p->svms.objects.rb_root), struct interval_tree_node, rb);
	}
	if (node)
		start_limit = max(start_limit, node->last + 1);


	*start = start_limit;
	*last = end_limit - 1;


> +
> +	end_limit = min(vma->vm_end >> PAGE_SHIFT,
> +			(*start + 0x200)) - 1;
> +
> +	*end = addr;
> +
> +	while (*end < end_limit &&
> +		!interval_tree_iter_first(&p->svms.objects, *end + 1, *end + 1))
> +		*end += 1;

See above. My code snipped already calculates both the start and end
without requiring any loops.


> +	pr_debug("vma start: %lx start: %lx vma end: %lx end: %lx\n",
> +		  vma->vm_start >> PAGE_SHIFT, *start,
> +		  vma->vm_end >> PAGE_SHIFT, *end);
> +
> +	return 0;
> +
> +}
> +static struct
> +svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
> +						struct kfd_process *p,
> +						struct mm_struct *mm,
> +						int64_t addr)
> +{
> +	struct svm_range *prange = NULL;
> +	struct svm_range_list *svms;
> +	unsigned long start, end;

Rename "end" to "last". "end" is typically used for an exclusive end
address (just outside the range). "last" is typically used for an
inclusive end address (the last address still inside the range). You're
using an inclusive end address, so this should be called "last" to avoid
confusion.


> +	uint32_t gpuid, gpuidx;
> +
> +	if (svm_range_get_range_boundaries(p, addr << PAGE_SHIFT,
> +					   &start, &end))
> +		return NULL;
> +
> +	svms = &p->svms;
> +	prange = svm_range_new(&p->svms, start, end);
> +	if (!prange) {
> +		pr_debug("Failed to create prange in address [0x%llx]\\n", addr);
> +		goto out;
> +	}
> +	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
> +		pr_debug("failed to get gpuid from kgd\n");
> +		svm_range_free(prange);
> +		prange = NULL;
> +		goto out;
> +	}
> +	prange->preferred_loc = gpuid;
> +	prange->actual_loc = 0;
> +	/* Gurantee prange is migrate it */
> +	prange->validate_timestamp -= AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING;
> +	svm_range_add_to_svms(prange);
> +	svm_range_add_notifier_locked(mm, prange);
> +
> +out:
> +	return prange;
> +}
>  
>  /* svm_range_skip_recover - decide if prange can be recovered
>   * @prange: svm range structure
> @@ -2250,15 +2323,21 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
>  		goto out;
>  	}
>  
> -	mmap_read_lock(mm);
> +	mmap_write_lock(mm);

I was hoping we could keep a fast-path for the common case that takes
only the mmap_read_lock. We only need the mmap_write_lock if we need to
register a new range. If we do need to take the write lock, we should
also flush deferred work. Otherwise the range lookups from the interval
tree above may return outdated results.

Something like this:

	bool write_locked = false;
	...
	mmap_read_lock(mm);
retry_write_locked:
	mutex_lock(&svms->lock);
 	prange = svm_range_from_addr(svms, addr, NULL);
 	if (!prange) {
		...
		if (!write_locked) {
			/* Need the write lock to create new range with MMU notifier.
			 * Also flush pending deferred work to make sure the interval
			 * tree is up to date before we add a new range
			 */
			mutex_unlock(&svms->lock);
			mmap_read_unlock(mm);
			svm_range_list_lock_and_flush_work(svms, mm);
			write_locked = true;
			goto retry_write_locked;
		}
		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
		...
	}
	if (write_locked)
		mmap_write_downgrade(mm);
	...

Regards,
  Felix


>  	mutex_lock(&svms->lock);
>  	prange = svm_range_from_addr(svms, addr, NULL);
>  	if (!prange) {
>  		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
>  			 svms, addr);
> -		r = -EFAULT;
> -		goto out_unlock_svms;
> +		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
> +		if (!prange) {
> +			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
> +			svms, addr);
> +			mmap_write_downgrade(mm);
> +			goto out_unlock_svms;
> +		}
>  	}
> +	mmap_write_downgrade(mm);
>  
>  	mutex_lock(&prange->migrate_mutex);
>  
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH] drm/amdkfd: svm ranges creation for unregistered memory
@ 2021-04-19 17:24 Alex Sierra
  2021-04-19 20:34 ` Felix Kuehling
  0 siblings, 1 reply; 11+ messages in thread
From: Alex Sierra @ 2021-04-19 17:24 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alex Sierra

SVM ranges are created for unregistered memory, triggered
by page faults. These ranges are migrated/mapped to
GPU VRAM memory.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 85 +++++++++++++++++++++++++++-
 1 file changed, 82 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 45dd055118eb..4cbbfba01cae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2179,6 +2179,79 @@ svm_range_best_restore_location(struct svm_range *prange,
 
 	return -1;
 }
+static int
+svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
+				unsigned long *start, unsigned long *end)
+{
+	struct vm_area_struct *vma;
+	unsigned long start_limit, end_limit;
+
+	vma = find_vma(p->mm, addr);
+	if (!vma) {
+		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
+		return -1;
+	}
+	start_limit = max(vma->vm_start,
+			(unsigned long)ALIGN_DOWN(addr, 2UL << 20)) >> PAGE_SHIFT;
+	addr >>= PAGE_SHIFT;
+	*start = addr;
+
+	while (*start > start_limit &&
+		!interval_tree_iter_first(&p->svms.objects, *start - 1, *start - 1))
+		*start -= 1;
+
+	end_limit = min(vma->vm_end >> PAGE_SHIFT,
+			(*start + 0x200)) - 1;
+
+	*end = addr;
+
+	while (*end < end_limit &&
+		!interval_tree_iter_first(&p->svms.objects, *end + 1, *end + 1))
+		*end += 1;
+	pr_debug("vma start: %lx start: %lx vma end: %lx end: %lx\n",
+		  vma->vm_start >> PAGE_SHIFT, *start,
+		  vma->vm_end >> PAGE_SHIFT, *end);
+
+	return 0;
+
+}
+static struct
+svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
+						struct kfd_process *p,
+						struct mm_struct *mm,
+						int64_t addr)
+{
+	struct svm_range *prange = NULL;
+	struct svm_range_list *svms;
+	unsigned long start, end;
+	uint32_t gpuid, gpuidx;
+
+	if (svm_range_get_range_boundaries(p, addr << PAGE_SHIFT,
+					   &start, &end))
+		return NULL;
+
+	svms = &p->svms;
+	prange = svm_range_new(&p->svms, start, end);
+	if (!prange) {
+		pr_debug("Failed to create prange in address [0x%llx]\\n", addr);
+		goto out;
+	}
+	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
+		pr_debug("failed to get gpuid from kgd\n");
+		svm_range_free(prange);
+		prange = NULL;
+		goto out;
+	}
+	prange->preferred_loc = gpuid;
+	prange->actual_loc = 0;
+	/* Gurantee prange is migrate it */
+	prange->validate_timestamp -= AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING;
+	svm_range_add_to_svms(prange);
+	svm_range_add_notifier_locked(mm, prange);
+
+out:
+	return prange;
+}
 
 /* svm_range_skip_recover - decide if prange can be recovered
  * @prange: svm range structure
@@ -2250,15 +2323,21 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 		goto out;
 	}
 
-	mmap_read_lock(mm);
+	mmap_write_lock(mm);
 	mutex_lock(&svms->lock);
 	prange = svm_range_from_addr(svms, addr, NULL);
 	if (!prange) {
 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
 			 svms, addr);
-		r = -EFAULT;
-		goto out_unlock_svms;
+		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
+		if (!prange) {
+			pr_debug("failed to create unregisterd range svms 0x%p address [0x%llx]\n",
+			svms, addr);
+			mmap_write_downgrade(mm);
+			goto out_unlock_svms;
+		}
 	}
+	mmap_write_downgrade(mm);
 
 	mutex_lock(&prange->migrate_mutex);
 
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2021-04-22 15:31 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-20  1:52 [PATCH] drm/amdkfd: svm ranges creation for unregistered memory Alex Sierra
2021-04-21  0:16 ` philip yang
2021-04-21  0:45 ` Felix Kuehling
2021-04-21  1:25   ` Felix Kuehling
2021-04-22 13:08     ` philip yang
2021-04-22 13:20       ` Felix Kuehling
2021-04-22 15:16         ` philip yang
  -- strict thread matches above, loose matches on Subject: below --
2021-04-22 14:47 Alex Sierra
2021-04-22 15:31 ` Felix Kuehling
2021-04-19 17:24 Alex Sierra
2021-04-19 20:34 ` Felix Kuehling

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).