All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
@ 2017-09-11  7:53 Christian König
       [not found] ` <1505116409-9815-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2017-09-11  7:53 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

All users of a VM must always wait for updates with always
valid BOs to be completed.

v2: remove debugging leftovers, rename struct member

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
 3 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8aa37e0..4681dcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
-	if (r)
-		return r;
-
 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 	if (r)
 		return r;
@@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
 	}
 
 	r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
+	if (r)
+		return r;
+
+	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
+	if (r)
+		return r;
 
 	if (amdgpu_vm_debug && p->bo_list) {
 		/* Invalidate all BOs to test for userspace bugs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 55f1ecb..5042f09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 				goto error_free;
 
 			amdgpu_bo_fence(parent->base.bo, fence, true);
-			dma_fence_put(vm->last_dir_update);
-			vm->last_dir_update = dma_fence_get(fence);
-			dma_fence_put(fence);
+			dma_fence_put(vm->last_update);
+			vm->last_update = fence;
 		}
 	}
 
@@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			trace_amdgpu_vm_bo_mapping(mapping);
 	}
 
+	if (bo_va->base.bo &&
+	    bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+		dma_fence_put(vm->last_update);
+		vm->last_update = dma_fence_get(bo_va->last_pt_update);
+	}
+
 	return 0;
 }
 
@@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
 		  "CPU update of VM recommended only for large BAR system\n");
-	vm->last_dir_update = NULL;
+	vm->last_update = NULL;
 
 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
 			AMDGPU_GEM_CREATE_VRAM_CLEARED;
@@ -2692,7 +2697,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	}
 
 	amdgpu_vm_free_levels(&vm->root);
-	dma_fence_put(vm->last_dir_update);
+	dma_fence_put(vm->last_update);
 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
 		amdgpu_vm_free_reserved_vmid(adev, vm, i);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index c1accd1..cb6a622 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -140,7 +140,7 @@ struct amdgpu_vm {
 
 	/* contains the page directory */
 	struct amdgpu_vm_pt     root;
-	struct dma_fence	*last_dir_update;
+	struct dma_fence	*last_update;
 
 	/* protecting freed */
 	spinlock_t		freed_lock;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found] ` <1505116409-9815-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-11  8:59   ` zhoucm1
       [not found]     ` <5430f286-3089-1be2-98d3-a0a89eb67aee-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: zhoucm1 @ 2017-09-11  8:59 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 72fps 
to 24fps.

This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
previous non-per-vm-bo.

all moved and relocated fence have already synced, we just need to catch 
the va mapping but which is CS itself required, as my proposal in 
another thread, we maybe need to expand va_ioctl to return mapping fence 
to UMD, then UMD passes it to CS as dependency.


Regards,

David Zhou


On 2017年09月11日 15:53, Christian König wrote:
> From: Christian König <christian.koenig@amd.com>
>
> All users of a VM must always wait for updates with always
> valid BOs to be completed.
>
> v2: remove debugging leftovers, rename struct member
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>   3 files changed, 17 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 8aa37e0..4681dcc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
>   	if (r)
>   		return r;
>   
> -	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
> -	if (r)
> -		return r;
> -
>   	r = amdgpu_vm_clear_freed(adev, vm, NULL);
>   	if (r)
>   		return r;
> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
>   	}
>   
>   	r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
> +	if (r)
> +		return r;
> +
> +	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
> +	if (r)
> +		return r;
>   
>   	if (amdgpu_vm_debug && p->bo_list) {
>   		/* Invalidate all BOs to test for userspace bugs */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 55f1ecb..5042f09 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
>   				goto error_free;
>   
>   			amdgpu_bo_fence(parent->base.bo, fence, true);
> -			dma_fence_put(vm->last_dir_update);
> -			vm->last_dir_update = dma_fence_get(fence);
> -			dma_fence_put(fence);
> +			dma_fence_put(vm->last_update);
> +			vm->last_update = fence;
>   		}
>   	}
>   
> @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>   			trace_amdgpu_vm_bo_mapping(mapping);
>   	}
>   
> +	if (bo_va->base.bo &&
> +	    bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
> +		dma_fence_put(vm->last_update);
> +		vm->last_update = dma_fence_get(bo_va->last_pt_update);
> +	}
> +
>   	return 0;
>   }
>   
> @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   			 vm->use_cpu_for_update ? "CPU" : "SDMA");
>   	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
>   		  "CPU update of VM recommended only for large BAR system\n");
> -	vm->last_dir_update = NULL;
> +	vm->last_update = NULL;
>   
>   	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>   			AMDGPU_GEM_CREATE_VRAM_CLEARED;
> @@ -2692,7 +2697,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>   	}
>   
>   	amdgpu_vm_free_levels(&vm->root);
> -	dma_fence_put(vm->last_dir_update);
> +	dma_fence_put(vm->last_update);
>   	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>   		amdgpu_vm_free_reserved_vmid(adev, vm, i);
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index c1accd1..cb6a622 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>   
>   	/* contains the page directory */
>   	struct amdgpu_vm_pt     root;
> -	struct dma_fence	*last_dir_update;
> +	struct dma_fence	*last_update;
>   
>   	/* protecting freed */
>   	spinlock_t		freed_lock;

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]     ` <5430f286-3089-1be2-98d3-a0a89eb67aee-5C7GfCeVMHo@public.gmane.org>
@ 2017-09-11  9:04       ` Christian König
       [not found]         ` <ba49e14c-8f05-1f1f-1798-2705237b87b0-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2017-09-11  9:04 UTC (permalink / raw)
  To: zhoucm1, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
> 72fps to 24fps.
>
> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
> previous non-per-vm-bo.

This is irrelevant and actually a foreseen consequence of per VM BOs.

If the performance drops is in-acceptable then the UMD shouldn't use 
this feature.

> all moved and relocated fence have already synced, we just need to 
> catch the va mapping but which is CS itself required, as my proposal 
> in another thread, we maybe need to expand va_ioctl to return mapping 
> fence to UMD, then UMD passes it to CS as dependency.

That can be an addition to the existing handling, but first of all the 
current state must be corrected.

There are at least two bug reports on the open driver fixed by this, so 
please review.

Thanks,
Christian.

Am 11.09.2017 um 10:59 schrieb zhoucm1:
> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
> 72fps to 24fps.
>
> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
> previous non-per-vm-bo.
>
> all moved and relocated fence have already synced, we just need to 
> catch the va mapping but which is CS itself required, as my proposal 
> in another thread, we maybe need to expand va_ioctl to return mapping 
> fence to UMD, then UMD passes it to CS as dependency.
>
>
> Regards,
>
> David Zhou
>
>
> On 2017年09月11日 15:53, Christian König wrote:
>> From: Christian König <christian.koenig@amd.com>
>>
>> All users of a VM must always wait for updates with always
>> valid BOs to be completed.
>>
>> v2: remove debugging leftovers, rename struct member
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>>   3 files changed, 17 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> index 8aa37e0..4681dcc 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct 
>> amdgpu_cs_parser *p)
>>       if (r)
>>           return r;
>>   -    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
>> -    if (r)
>> -        return r;
>> -
>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
>>       if (r)
>>           return r;
>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct 
>> amdgpu_cs_parser *p)
>>       }
>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
>> +    if (r)
>> +        return r;
>> +
>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
>> +    if (r)
>> +        return r;
>>         if (amdgpu_vm_debug && p->bo_list) {
>>           /* Invalidate all BOs to test for userspace bugs */
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 55f1ecb..5042f09 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct 
>> amdgpu_device *adev,
>>                   goto error_free;
>>                 amdgpu_bo_fence(parent->base.bo, fence, true);
>> -            dma_fence_put(vm->last_dir_update);
>> -            vm->last_dir_update = dma_fence_get(fence);
>> -            dma_fence_put(fence);
>> +            dma_fence_put(vm->last_update);
>> +            vm->last_update = fence;
>>           }
>>       }
>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device 
>> *adev,
>>               trace_amdgpu_vm_bo_mapping(mapping);
>>       }
>>   +    if (bo_va->base.bo &&
>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
>> +        dma_fence_put(vm->last_update);
>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
>> +    }
>> +
>>       return 0;
>>   }
>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device 
>> *adev, struct amdgpu_vm *vm,
>>                vm->use_cpu_for_update ? "CPU" : "SDMA");
>>       WARN_ONCE((vm->use_cpu_for_update & 
>> !amdgpu_vm_is_large_bar(adev)),
>>             "CPU update of VM recommended only for large BAR system\n");
>> -    vm->last_dir_update = NULL;
>> +    vm->last_update = NULL;
>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>>               AMDGPU_GEM_CREATE_VRAM_CLEARED;
>> @@ -2692,7 +2697,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, 
>> struct amdgpu_vm *vm)
>>       }
>>         amdgpu_vm_free_levels(&vm->root);
>> -    dma_fence_put(vm->last_dir_update);
>> +    dma_fence_put(vm->last_update);
>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>>           amdgpu_vm_free_reserved_vmid(adev, vm, i);
>>   }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> index c1accd1..cb6a622 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>>         /* contains the page directory */
>>       struct amdgpu_vm_pt     root;
>> -    struct dma_fence    *last_dir_update;
>> +    struct dma_fence    *last_update;
>>         /* protecting freed */
>>       spinlock_t        freed_lock;
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]         ` <ba49e14c-8f05-1f1f-1798-2705237b87b0-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-11  9:10           ` zhoucm1
       [not found]             ` <7f5bb679-7385-135e-dc10-d40ba391f082-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: zhoucm1 @ 2017-09-11  9:10 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW



On 2017年09月11日 17:04, Christian König wrote:
>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
>> 72fps to 24fps.
>>
>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
>> previous non-per-vm-bo.
>
> This is irrelevant and actually a foreseen consequence of per VM BOs.
>
> If the performance drops is in-acceptable then the UMD shouldn't use 
> this feature.
I got your mean, if UMD doesn't use this feature, then all BOs should be 
kernel only, then this change should only be limited to kernel VM BOs, 
not include UMD BOs.
With this limitation, I think the change also can fix your issue.

Regards,
David Zhou
>
>> all moved and relocated fence have already synced, we just need to 
>> catch the va mapping but which is CS itself required, as my proposal 
>> in another thread, we maybe need to expand va_ioctl to return mapping 
>> fence to UMD, then UMD passes it to CS as dependency.
>
> That can be an addition to the existing handling, but first of all the 
> current state must be corrected.
>
> There are at least two bug reports on the open driver fixed by this, 
> so please review.
>
> Thanks,
> Christian.
>
> Am 11.09.2017 um 10:59 schrieb zhoucm1:
>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
>> 72fps to 24fps.
>>
>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
>> previous non-per-vm-bo.
>>
>> all moved and relocated fence have already synced, we just need to 
>> catch the va mapping but which is CS itself required, as my proposal 
>> in another thread, we maybe need to expand va_ioctl to return mapping 
>> fence to UMD, then UMD passes it to CS as dependency.
>>
>>
>> Regards,
>>
>> David Zhou
>>
>>
>> On 2017年09月11日 15:53, Christian König wrote:
>>> From: Christian König <christian.koenig@amd.com>
>>>
>>> All users of a VM must always wait for updates with always
>>> valid BOs to be completed.
>>>
>>> v2: remove debugging leftovers, rename struct member
>>>
>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>>>   3 files changed, 17 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> index 8aa37e0..4681dcc 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct 
>>> amdgpu_cs_parser *p)
>>>       if (r)
>>>           return r;
>>>   -    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
>>> -    if (r)
>>> -        return r;
>>> -
>>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
>>>       if (r)
>>>           return r;
>>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct 
>>> amdgpu_cs_parser *p)
>>>       }
>>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
>>> +    if (r)
>>> +        return r;
>>> +
>>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
>>> +    if (r)
>>> +        return r;
>>>         if (amdgpu_vm_debug && p->bo_list) {
>>>           /* Invalidate all BOs to test for userspace bugs */
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> index 55f1ecb..5042f09 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct 
>>> amdgpu_device *adev,
>>>                   goto error_free;
>>>                 amdgpu_bo_fence(parent->base.bo, fence, true);
>>> -            dma_fence_put(vm->last_dir_update);
>>> -            vm->last_dir_update = dma_fence_get(fence);
>>> -            dma_fence_put(fence);
>>> +            dma_fence_put(vm->last_update);
>>> +            vm->last_update = fence;
>>>           }
>>>       }
>>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct 
>>> amdgpu_device *adev,
>>>               trace_amdgpu_vm_bo_mapping(mapping);
>>>       }
>>>   +    if (bo_va->base.bo &&
>>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
>>> +        dma_fence_put(vm->last_update);
>>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
>>> +    }
>>> +
>>>       return 0;
>>>   }
>>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device 
>>> *adev, struct amdgpu_vm *vm,
>>>                vm->use_cpu_for_update ? "CPU" : "SDMA");
>>>       WARN_ONCE((vm->use_cpu_for_update & 
>>> !amdgpu_vm_is_large_bar(adev)),
>>>             "CPU update of VM recommended only for large BAR 
>>> system\n");
>>> -    vm->last_dir_update = NULL;
>>> +    vm->last_update = NULL;
>>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>>>               AMDGPU_GEM_CREATE_VRAM_CLEARED;
>>> @@ -2692,7 +2697,7 @@ void amdgpu_vm_fini(struct amdgpu_device 
>>> *adev, struct amdgpu_vm *vm)
>>>       }
>>>         amdgpu_vm_free_levels(&vm->root);
>>> -    dma_fence_put(vm->last_dir_update);
>>> +    dma_fence_put(vm->last_update);
>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>>>           amdgpu_vm_free_reserved_vmid(adev, vm, i);
>>>   }
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> index c1accd1..cb6a622 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>>>         /* contains the page directory */
>>>       struct amdgpu_vm_pt     root;
>>> -    struct dma_fence    *last_dir_update;
>>> +    struct dma_fence    *last_update;
>>>         /* protecting freed */
>>>       spinlock_t        freed_lock;
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]             ` <7f5bb679-7385-135e-dc10-d40ba391f082-5C7GfCeVMHo@public.gmane.org>
@ 2017-09-11 10:53               ` Christian König
       [not found]                 ` <98b9088c-c2e1-8f4e-552b-d6db06ca0047-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2017-09-11 10:53 UTC (permalink / raw)
  To: zhoucm1, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 11.09.2017 um 11:10 schrieb zhoucm1:
>
>
> On 2017年09月11日 17:04, Christian König wrote:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
>>> previous non-per-vm-bo.
>>
>> This is irrelevant and actually a foreseen consequence of per VM BOs.
>>
>> If the performance drops is in-acceptable then the UMD shouldn't use 
>> this feature.
> I got your mean, if UMD doesn't use this feature, then all BOs should 
> be kernel only, then this change should only be limited to kernel VM 
> BOs, not include UMD BOs.
> With this limitation, I think the change also can fix your issue.

Correct, yes. Can I get your rb or ab now? We need to get this fixed asap.

Thanks,
Christian.

>
> Regards,
> David Zhou
>>
>>> all moved and relocated fence have already synced, we just need to 
>>> catch the va mapping but which is CS itself required, as my proposal 
>>> in another thread, we maybe need to expand va_ioctl to return 
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>
>> That can be an addition to the existing handling, but first of all 
>> the current state must be corrected.
>>
>> There are at least two bug reports on the open driver fixed by this, 
>> so please review.
>>
>> Thanks,
>> Christian.
>>
>> Am 11.09.2017 um 10:59 schrieb zhoucm1:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
>>> previous non-per-vm-bo.
>>>
>>> all moved and relocated fence have already synced, we just need to 
>>> catch the va mapping but which is CS itself required, as my proposal 
>>> in another thread, we maybe need to expand va_ioctl to return 
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>>
>>>
>>> Regards,
>>>
>>> David Zhou
>>>
>>>
>>> On 2017年09月11日 15:53, Christian König wrote:
>>>> From: Christian König <christian.koenig@amd.com>
>>>>
>>>> All users of a VM must always wait for updates with always
>>>> valid BOs to be completed.
>>>>
>>>> v2: remove debugging leftovers, rename struct member
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>>>>   3 files changed, 17 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> index 8aa37e0..4681dcc 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct 
>>>> amdgpu_cs_parser *p)
>>>>       if (r)
>>>>           return r;
>>>>   -    r = amdgpu_sync_fence(adev, &p->job->sync, 
>>>> vm->last_dir_update);
>>>> -    if (r)
>>>> -        return r;
>>>> -
>>>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
>>>>       if (r)
>>>>           return r;
>>>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct 
>>>> amdgpu_cs_parser *p)
>>>>       }
>>>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
>>>> +    if (r)
>>>> +        return r;
>>>> +
>>>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
>>>> +    if (r)
>>>> +        return r;
>>>>         if (amdgpu_vm_debug && p->bo_list) {
>>>>           /* Invalidate all BOs to test for userspace bugs */
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> index 55f1ecb..5042f09 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct 
>>>> amdgpu_device *adev,
>>>>                   goto error_free;
>>>>                 amdgpu_bo_fence(parent->base.bo, fence, true);
>>>> -            dma_fence_put(vm->last_dir_update);
>>>> -            vm->last_dir_update = dma_fence_get(fence);
>>>> -            dma_fence_put(fence);
>>>> +            dma_fence_put(vm->last_update);
>>>> +            vm->last_update = fence;
>>>>           }
>>>>       }
>>>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct 
>>>> amdgpu_device *adev,
>>>>               trace_amdgpu_vm_bo_mapping(mapping);
>>>>       }
>>>>   +    if (bo_va->base.bo &&
>>>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
>>>> +        dma_fence_put(vm->last_update);
>>>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
>>>> +    }
>>>> +
>>>>       return 0;
>>>>   }
>>>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device 
>>>> *adev, struct amdgpu_vm *vm,
>>>>                vm->use_cpu_for_update ? "CPU" : "SDMA");
>>>>       WARN_ONCE((vm->use_cpu_for_update & 
>>>> !amdgpu_vm_is_large_bar(adev)),
>>>>             "CPU update of VM recommended only for large BAR 
>>>> system\n");
>>>> -    vm->last_dir_update = NULL;
>>>> +    vm->last_update = NULL;
>>>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>>>>               AMDGPU_GEM_CREATE_VRAM_CLEARED;
>>>> @@ -2692,7 +2697,7 @@ void amdgpu_vm_fini(struct amdgpu_device 
>>>> *adev, struct amdgpu_vm *vm)
>>>>       }
>>>>         amdgpu_vm_free_levels(&vm->root);
>>>> -    dma_fence_put(vm->last_dir_update);
>>>> +    dma_fence_put(vm->last_update);
>>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>>>>           amdgpu_vm_free_reserved_vmid(adev, vm, i);
>>>>   }
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> index c1accd1..cb6a622 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>>>>         /* contains the page directory */
>>>>       struct amdgpu_vm_pt     root;
>>>> -    struct dma_fence    *last_dir_update;
>>>> +    struct dma_fence    *last_update;
>>>>         /* protecting freed */
>>>>       spinlock_t        freed_lock;
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]                 ` <98b9088c-c2e1-8f4e-552b-d6db06ca0047-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-11 10:57                   ` He, Roger
       [not found]                     ` <MWHPR1201MB0127808E4B24F827F3B82019FD680-3iK1xFAIwjq9imrIu4W8xGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: He, Roger @ 2017-09-11 10:57 UTC (permalink / raw)
  To: Christian König, Zhou, David(ChunMing),
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Reviewed-by: Roger He <Hongbo.He@amd.com>

Thanks
Roger(Hongbo.He)
-----Original Message-----
From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Christian K?nig
Sent: Monday, September 11, 2017 6:53 PM
To: Zhou, David(ChunMing) <David1.Zhou@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2

Am 11.09.2017 um 11:10 schrieb zhoucm1:
>
>
> On 2017年09月11日 17:04, Christian König wrote:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
>>> previous non-per-vm-bo.
>>
>> This is irrelevant and actually a foreseen consequence of per VM BOs.
>>
>> If the performance drops is in-acceptable then the UMD shouldn't use 
>> this feature.
> I got your mean, if UMD doesn't use this feature, then all BOs should 
> be kernel only, then this change should only be limited to kernel VM 
> BOs, not include UMD BOs.
> With this limitation, I think the change also can fix your issue.

Correct, yes. Can I get your rb or ab now? We need to get this fixed asap.

Thanks,
Christian.

>
> Regards,
> David Zhou
>>
>>> all moved and relocated fence have already synced, we just need to 
>>> catch the va mapping but which is CS itself required, as my proposal 
>>> in another thread, we maybe need to expand va_ioctl to return 
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>
>> That can be an addition to the existing handling, but first of all 
>> the current state must be corrected.
>>
>> There are at least two bug reports on the open driver fixed by this, 
>> so please review.
>>
>> Thanks,
>> Christian.
>>
>> Am 11.09.2017 um 10:59 schrieb zhoucm1:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from 
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of 
>>> previous non-per-vm-bo.
>>>
>>> all moved and relocated fence have already synced, we just need to 
>>> catch the va mapping but which is CS itself required, as my proposal 
>>> in another thread, we maybe need to expand va_ioctl to return 
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>>
>>>
>>> Regards,
>>>
>>> David Zhou
>>>
>>>
>>> On 2017年09月11日 15:53, Christian König wrote:
>>>> From: Christian König <christian.koenig@amd.com>
>>>>
>>>> All users of a VM must always wait for updates with always valid 
>>>> BOs to be completed.
>>>>
>>>> v2: remove debugging leftovers, rename struct member
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>>>>   3 files changed, 17 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> index 8aa37e0..4681dcc 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct 
>>>> amdgpu_cs_parser *p)
>>>>       if (r)
>>>>           return r;
>>>>   -    r = amdgpu_sync_fence(adev, &p->job->sync, 
>>>> vm->last_dir_update);
>>>> -    if (r)
>>>> -        return r;
>>>> -
>>>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
>>>>       if (r)
>>>>           return r;
>>>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct 
>>>> amdgpu_cs_parser *p)
>>>>       }
>>>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
>>>> +    if (r)
>>>> +        return r;
>>>> +
>>>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
>>>> +    if (r)
>>>> +        return r;
>>>>         if (amdgpu_vm_debug && p->bo_list) {
>>>>           /* Invalidate all BOs to test for userspace bugs */ diff 
>>>> --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> index 55f1ecb..5042f09 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct 
>>>> amdgpu_device *adev,
>>>>                   goto error_free;
>>>>                 amdgpu_bo_fence(parent->base.bo, fence, true);
>>>> -            dma_fence_put(vm->last_dir_update);
>>>> -            vm->last_dir_update = dma_fence_get(fence);
>>>> -            dma_fence_put(fence);
>>>> +            dma_fence_put(vm->last_update);
>>>> +            vm->last_update = fence;
>>>>           }
>>>>       }
>>>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct 
>>>> amdgpu_device *adev,
>>>>               trace_amdgpu_vm_bo_mapping(mapping);
>>>>       }
>>>>   +    if (bo_va->base.bo &&
>>>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
>>>> +        dma_fence_put(vm->last_update);
>>>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
>>>> +    }
>>>> +
>>>>       return 0;
>>>>   }
>>>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device 
>>>> *adev, struct amdgpu_vm *vm,
>>>>                vm->use_cpu_for_update ? "CPU" : "SDMA");
>>>>       WARN_ONCE((vm->use_cpu_for_update & 
>>>> !amdgpu_vm_is_large_bar(adev)),
>>>>             "CPU update of VM recommended only for large BAR 
>>>> system\n");
>>>> -    vm->last_dir_update = NULL;
>>>> +    vm->last_update = NULL;
>>>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>>>>               AMDGPU_GEM_CREATE_VRAM_CLEARED; @@ -2692,7 +2697,7 @@ 
>>>> void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm 
>>>> *vm)
>>>>       }
>>>>         amdgpu_vm_free_levels(&vm->root);
>>>> -    dma_fence_put(vm->last_dir_update);
>>>> +    dma_fence_put(vm->last_update);
>>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>>>>           amdgpu_vm_free_reserved_vmid(adev, vm, i);
>>>>   }
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> index c1accd1..cb6a622 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>>>>         /* contains the page directory */
>>>>       struct amdgpu_vm_pt     root;
>>>> -    struct dma_fence    *last_dir_update;
>>>> +    struct dma_fence    *last_update;
>>>>         /* protecting freed */
>>>>       spinlock_t        freed_lock;
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]                     ` <MWHPR1201MB0127808E4B24F827F3B82019FD680-3iK1xFAIwjq9imrIu4W8xGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
@ 2017-09-11 11:42                       ` Zhou, David(ChunMing)
       [not found]                         ` <MWHPR1201MB0206240AA6FA420B2EDA5677B4680-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Zhou, David(ChunMing) @ 2017-09-11 11:42 UTC (permalink / raw)
  To: He, Roger; +Cc: Zhou, David(ChunMing), Christian K鰊ig, amd-gfx


[-- Attachment #1.1: Type: text/plain, Size: 7396 bytes --]

My mean is you should add a condition in code to check if BO is kernel BO or not. I expect V3.

Regards,
David


发自坚果 Pro

He, Roger <Hongbo.He@amd.com> 于 2017年9月11日 下午6:57写道:

Reviewed-by: Roger He <Hongbo.He@amd.com>

Thanks
Roger(Hongbo.He)
-----Original Message-----
From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Christian K?nig
Sent: Monday, September 11, 2017 6:53 PM
To: Zhou, David(ChunMing) <David1.Zhou@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2

Am 11.09.2017 um 11:10 schrieb zhoucm1:
>
>
> On 2017年09月11日 17:04, Christian König wrote:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
>>> previous non-per-vm-bo.
>>
>> This is irrelevant and actually a foreseen consequence of per VM BOs.
>>
>> If the performance drops is in-acceptable then the UMD shouldn't use
>> this feature.
> I got your mean, if UMD doesn't use this feature, then all BOs should
> be kernel only, then this change should only be limited to kernel VM
> BOs, not include UMD BOs.
> With this limitation, I think the change also can fix your issue.

Correct, yes. Can I get your rb or ab now? We need to get this fixed asap.

Thanks,
Christian.

>
> Regards,
> David Zhou
>>
>>> all moved and relocated fence have already synced, we just need to
>>> catch the va mapping but which is CS itself required, as my proposal
>>> in another thread, we maybe need to expand va_ioctl to return
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>
>> That can be an addition to the existing handling, but first of all
>> the current state must be corrected.
>>
>> There are at least two bug reports on the open driver fixed by this,
>> so please review.
>>
>> Thanks,
>> Christian.
>>
>> Am 11.09.2017 um 10:59 schrieb zhoucm1:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
>>> previous non-per-vm-bo.
>>>
>>> all moved and relocated fence have already synced, we just need to
>>> catch the va mapping but which is CS itself required, as my proposal
>>> in another thread, we maybe need to expand va_ioctl to return
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>>
>>>
>>> Regards,
>>>
>>> David Zhou
>>>
>>>
>>> On 2017年09月11日 15:53, Christian König wrote:
>>>> From: Christian König <christian.koenig@amd.com>
>>>>
>>>> All users of a VM must always wait for updates with always valid
>>>> BOs to be completed.
>>>>
>>>> v2: remove debugging leftovers, rename struct member
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>>>>   3 files changed, 17 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> index 8aa37e0..4681dcc 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct
>>>> amdgpu_cs_parser *p)
>>>>       if (r)
>>>>           return r;
>>>>   -    r = amdgpu_sync_fence(adev, &p->job->sync,
>>>> vm->last_dir_update);
>>>> -    if (r)
>>>> -        return r;
>>>> -
>>>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
>>>>       if (r)
>>>>           return r;
>>>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct
>>>> amdgpu_cs_parser *p)
>>>>       }
>>>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
>>>> +    if (r)
>>>> +        return r;
>>>> +
>>>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
>>>> +    if (r)
>>>> +        return r;
>>>>         if (amdgpu_vm_debug && p->bo_list) {
>>>>           /* Invalidate all BOs to test for userspace bugs */ diff
>>>> --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> index 55f1ecb..5042f09 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct
>>>> amdgpu_device *adev,
>>>>                   goto error_free;
>>>>                 amdgpu_bo_fence(parent->base.bo, fence, true);
>>>> -            dma_fence_put(vm->last_dir_update);
>>>> -            vm->last_dir_update = dma_fence_get(fence);
>>>> -            dma_fence_put(fence);
>>>> +            dma_fence_put(vm->last_update);
>>>> +            vm->last_update = fence;
>>>>           }
>>>>       }
>>>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct
>>>> amdgpu_device *adev,
>>>>               trace_amdgpu_vm_bo_mapping(mapping);
>>>>       }
>>>>   +    if (bo_va->base.bo &&
>>>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
>>>> +        dma_fence_put(vm->last_update);
>>>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
>>>> +    }
>>>> +
>>>>       return 0;
>>>>   }
>>>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device
>>>> *adev, struct amdgpu_vm *vm,
>>>>                vm->use_cpu_for_update ? "CPU" : "SDMA");
>>>>       WARN_ONCE((vm->use_cpu_for_update &
>>>> !amdgpu_vm_is_large_bar(adev)),
>>>>             "CPU update of VM recommended only for large BAR
>>>> system\n");
>>>> -    vm->last_dir_update = NULL;
>>>> +    vm->last_update = NULL;
>>>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>>>>               AMDGPU_GEM_CREATE_VRAM_CLEARED; @@ -2692,7 +2697,7 @@
>>>> void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm
>>>> *vm)
>>>>       }
>>>>         amdgpu_vm_free_levels(&vm->root);
>>>> -    dma_fence_put(vm->last_dir_update);
>>>> +    dma_fence_put(vm->last_update);
>>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>>>>           amdgpu_vm_free_reserved_vmid(adev, vm, i);
>>>>   }
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> index c1accd1..cb6a622 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>>>>         /* contains the page directory */
>>>>       struct amdgpu_vm_pt     root;
>>>> -    struct dma_fence    *last_dir_update;
>>>> +    struct dma_fence    *last_update;
>>>>         /* protecting freed */
>>>>       spinlock_t        freed_lock;
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[-- Attachment #1.2: Type: text/html, Size: 14016 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]                         ` <MWHPR1201MB0206240AA6FA420B2EDA5677B4680-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
@ 2017-09-11 12:00                           ` Christian König
       [not found]                             ` <0566390d-8378-f214-ef64-30b1784f454a-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Christian König @ 2017-09-11 12:00 UTC (permalink / raw)
  To: Zhou, David(ChunMing), He, Roger; +Cc: amd-gfx


[-- Attachment #1.1: Type: text/plain, Size: 8392 bytes --]

Ok then we have a misunderstanding here. This applies to all per VM BOs, 
not just kernel BOs.

It's just that as long as the per VM BOs aren't used the handling is 
still correct and we don't have any updates to UMD BOs we implicitly 
wait on.

Sorry I should have waited a bit more, but since it is a very rather 
urgent bug fix I've already pushed it with Rogers rb.

Regards,
Christian.

Am 11.09.2017 um 13:42 schrieb Zhou, David(ChunMing):
> My mean is you should add a condition in code to check if BO is kernel BO or not. I expect V3.
>
> Regards,
> David
>
> 发自坚果 Pro
>
> He, Roger <Hongbo.He-5C7GfCeVMHo@public.gmane.org> 于 2017年9月11日 下午6:57写道:
>
> Reviewed-by: Roger He <Hongbo.He-5C7GfCeVMHo@public.gmane.org>
>
> Thanks
> Roger(Hongbo.He)
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org] On Behalf 
> Of Christian K?nig
> Sent: Monday, September 11, 2017 6:53 PM
> To: Zhou, David(ChunMing) <David1.Zhou-5C7GfCeVMHo@public.gmane.org>; 
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> Subject: Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
>
> Am 11.09.2017 um 11:10 schrieb zhoucm1:
> >
> >
> > On 2017年09月11日 17:04, Christian König wrote:
> >>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
> >>> 72fps to 24fps.
> >>>
> >>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
> >>> previous non-per-vm-bo.
> >>
> >> This is irrelevant and actually a foreseen consequence of per VM BOs.
> >>
> >> If the performance drops is in-acceptable then the UMD shouldn't use
> >> this feature.
> > I got your mean, if UMD doesn't use this feature, then all BOs should
> > be kernel only, then this change should only be limited to kernel VM
> > BOs, not include UMD BOs.
> > With this limitation, I think the change also can fix your issue.
>
> Correct, yes. Can I get your rb or ab now? We need to get this fixed asap.
>
> Thanks,
> Christian.
>
> >
> > Regards,
> > David Zhou
> >>
> >>> all moved and relocated fence have already synced, we just need to
> >>> catch the va mapping but which is CS itself required, as my proposal
> >>> in another thread, we maybe need to expand va_ioctl to return
> >>> mapping fence to UMD, then UMD passes it to CS as dependency.
> >>
> >> That can be an addition to the existing handling, but first of all
> >> the current state must be corrected.
> >>
> >> There are at least two bug reports on the open driver fixed by this,
> >> so please review.
> >>
> >> Thanks,
> >> Christian.
> >>
> >> Am 11.09.2017 um 10:59 schrieb zhoucm1:
> >>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
> >>> 72fps to 24fps.
> >>>
> >>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
> >>> previous non-per-vm-bo.
> >>>
> >>> all moved and relocated fence have already synced, we just need to
> >>> catch the va mapping but which is CS itself required, as my proposal
> >>> in another thread, we maybe need to expand va_ioctl to return
> >>> mapping fence to UMD, then UMD passes it to CS as dependency.
> >>>
> >>>
> >>> Regards,
> >>>
> >>> David Zhou
> >>>
> >>>
> >>> On 2017年09月11日 15:53, Christian König wrote:
> >>>> From: Christian König <christian.koenig-5C7GfCeVMHo@public.gmane.org>
> >>>>
> >>>> All users of a VM must always wait for updates with always valid
> >>>> BOs to be completed.
> >>>>
> >>>> v2: remove debugging leftovers, rename struct member
> >>>>
> >>>> Signed-off-by: Christian König <christian.koenig-5C7GfCeVMHo@public.gmane.org>
> >>>> ---
> >>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
> >>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
> >>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +-
> >>>>   3 files changed, 17 insertions(+), 10 deletions(-)
> >>>>
> >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> >>>> index 8aa37e0..4681dcc 100644
> >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> >>>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct
> >>>> amdgpu_cs_parser *p)
> >>>>       if (r)
> >>>>           return r;
> >>>>   -    r = amdgpu_sync_fence(adev, &p->job->sync,
> >>>> vm->last_dir_update);
> >>>> -    if (r)
> >>>> -        return r;
> >>>> -
> >>>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
> >>>>       if (r)
> >>>>           return r;
> >>>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct
> >>>> amdgpu_cs_parser *p)
> >>>>       }
> >>>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
> >>>> +    if (r)
> >>>> +        return r;
> >>>> +
> >>>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
> >>>> +    if (r)
> >>>> +        return r;
> >>>>         if (amdgpu_vm_debug && p->bo_list) {
> >>>>           /* Invalidate all BOs to test for userspace bugs */ diff
> >>>> --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> >>>> index 55f1ecb..5042f09 100644
> >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> >>>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct
> >>>> amdgpu_device *adev,
> >>>>                   goto error_free;
> >>>> amdgpu_bo_fence(parent->base.bo, fence, true);
> >>>> - dma_fence_put(vm->last_dir_update);
> >>>> -            vm->last_dir_update = dma_fence_get(fence);
> >>>> -            dma_fence_put(fence);
> >>>> + dma_fence_put(vm->last_update);
> >>>> +            vm->last_update = fence;
> >>>>           }
> >>>>       }
> >>>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct
> >>>> amdgpu_device *adev,
> >>>> trace_amdgpu_vm_bo_mapping(mapping);
> >>>>       }
> >>>>   +    if (bo_va->base.bo &&
> >>>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
> >>>> +        dma_fence_put(vm->last_update);
> >>>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
> >>>> +    }
> >>>> +
> >>>>       return 0;
> >>>>   }
> >>>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device
> >>>> *adev, struct amdgpu_vm *vm,
> >>>>                vm->use_cpu_for_update ? "CPU" : "SDMA");
> >>>>       WARN_ONCE((vm->use_cpu_for_update &
> >>>> !amdgpu_vm_is_large_bar(adev)),
> >>>>             "CPU update of VM recommended only for large BAR
> >>>> system\n");
> >>>> -    vm->last_dir_update = NULL;
> >>>> +    vm->last_update = NULL;
> >>>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
> >>>> AMDGPU_GEM_CREATE_VRAM_CLEARED; @@ -2692,7 +2697,7 @@
> >>>> void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm
> >>>> *vm)
> >>>>       }
> >>>> amdgpu_vm_free_levels(&vm->root);
> >>>> -    dma_fence_put(vm->last_dir_update);
> >>>> +    dma_fence_put(vm->last_update);
> >>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
> >>>> amdgpu_vm_free_reserved_vmid(adev, vm, i);
> >>>>   }
> >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> >>>> index c1accd1..cb6a622 100644
> >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> >>>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
> >>>>         /* contains the page directory */
> >>>>       struct amdgpu_vm_pt     root;
> >>>> -    struct dma_fence    *last_dir_update;
> >>>> +    struct dma_fence    *last_update;
> >>>>         /* protecting freed */
> >>>>       spinlock_t        freed_lock;
> >>>
> >>> _______________________________________________
> >>> amd-gfx mailing list
> >>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> >>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> >>
> >>
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx



[-- Attachment #1.2: Type: text/html, Size: 17831 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]                             ` <0566390d-8378-f214-ef64-30b1784f454a-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-09-11 12:30                               ` Zhou, David(ChunMing)
       [not found]                                 ` <MWHPR1201MB0206E76FCF5A0B58F57EFCC2B4680-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
  0 siblings, 1 reply; 10+ messages in thread
From: Zhou, David(ChunMing) @ 2017-09-11 12:30 UTC (permalink / raw)
  To: Christian K鰊ig; +Cc: Zhou, David(ChunMing), He, Roger, amd-gfx


[-- Attachment #1.1: Type: text/plain, Size: 8820 bytes --]

It still doesn't make sense, I don't understand why we need all existed update fence for CS, which could implicitly contain unrelated fence next CS used. For moved list case, we already synced. We should explicitly sync the needed fence.

For per VM BOs, we lack mapping fence, that's why VM fault happens. We need to expand va ioctl.

Regards,
David


发自坚果 Pro

Christian K鰊ig <deathsimple@vodafone.de> 于 2017年9月11日 下午8:00写道:

Ok then we have a misunderstanding here. This applies to all per VM BOs, not just kernel BOs.

It's just that as long as the per VM BOs aren't used the handling is still correct and we don't have any updates to UMD BOs we implicitly wait on.

Sorry I should have waited a bit more, but since it is a very rather urgent bug fix I've already pushed it with Rogers rb.

Regards,
Christian.

Am 11.09.2017 um 13:42 schrieb Zhou, David(ChunMing):
My mean is you should add a condition in code to check if BO is kernel BO or not. I expect V3.

Regards,
David


发自坚果 Pro

He, Roger <Hongbo.He@amd.com><mailto:Hongbo.He@amd.com> 于 2017年9月11日 下午6:57写道:

Reviewed-by: Roger He <Hongbo.He@amd.com><mailto:Hongbo.He@amd.com>

Thanks
Roger(Hongbo.He)
-----Original Message-----
From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Christian K?nig
Sent: Monday, September 11, 2017 6:53 PM
To: Zhou, David(ChunMing) <David1.Zhou@amd.com><mailto:David1.Zhou@amd.com>; amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
Subject: Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2

Am 11.09.2017 um 11:10 schrieb zhoucm1:
>
>
> On 2017年09月11日 17:04, Christian König wrote:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
>>> previous non-per-vm-bo.
>>
>> This is irrelevant and actually a foreseen consequence of per VM BOs.
>>
>> If the performance drops is in-acceptable then the UMD shouldn't use
>> this feature.
> I got your mean, if UMD doesn't use this feature, then all BOs should
> be kernel only, then this change should only be limited to kernel VM
> BOs, not include UMD BOs.
> With this limitation, I think the change also can fix your issue.

Correct, yes. Can I get your rb or ab now? We need to get this fixed asap.

Thanks,
Christian.

>
> Regards,
> David Zhou
>>
>>> all moved and relocated fence have already synced, we just need to
>>> catch the va mapping but which is CS itself required, as my proposal
>>> in another thread, we maybe need to expand va_ioctl to return
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>
>> That can be an addition to the existing handling, but first of all
>> the current state must be corrected.
>>
>> There are at least two bug reports on the open driver fixed by this,
>> so please review.
>>
>> Thanks,
>> Christian.
>>
>> Am 11.09.2017 um 10:59 schrieb zhoucm1:
>>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
>>> 72fps to 24fps.
>>>
>>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
>>> previous non-per-vm-bo.
>>>
>>> all moved and relocated fence have already synced, we just need to
>>> catch the va mapping but which is CS itself required, as my proposal
>>> in another thread, we maybe need to expand va_ioctl to return
>>> mapping fence to UMD, then UMD passes it to CS as dependency.
>>>
>>>
>>> Regards,
>>>
>>> David Zhou
>>>
>>>
>>> On 2017年09月11日 15:53, Christian König wrote:
>>>> From: Christian König <christian.koenig@amd.com><mailto:christian.koenig@amd.com>
>>>>
>>>> All users of a VM must always wait for updates with always valid
>>>> BOs to be completed.
>>>>
>>>> v2: remove debugging leftovers, rename struct member
>>>>
>>>> Signed-off-by: Christian König <christian.koenig@amd.com><mailto:christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>>>>   3 files changed, 17 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> index 8aa37e0..4681dcc 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>>>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct
>>>> amdgpu_cs_parser *p)
>>>>       if (r)
>>>>           return r;
>>>>   -    r = amdgpu_sync_fence(adev, &p->job->sync,
>>>> vm->last_dir_update);
>>>> -    if (r)
>>>> -        return r;
>>>> -
>>>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
>>>>       if (r)
>>>>           return r;
>>>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct
>>>> amdgpu_cs_parser *p)
>>>>       }
>>>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
>>>> +    if (r)
>>>> +        return r;
>>>> +
>>>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
>>>> +    if (r)
>>>> +        return r;
>>>>         if (amdgpu_vm_debug && p->bo_list) {
>>>>           /* Invalidate all BOs to test for userspace bugs */ diff
>>>> --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> index 55f1ecb..5042f09 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct
>>>> amdgpu_device *adev,
>>>>                   goto error_free;
>>>>                 amdgpu_bo_fence(parent->base.bo, fence, true);
>>>> -            dma_fence_put(vm->last_dir_update);
>>>> -            vm->last_dir_update = dma_fence_get(fence);
>>>> -            dma_fence_put(fence);
>>>> +            dma_fence_put(vm->last_update);
>>>> +            vm->last_update = fence;
>>>>           }
>>>>       }
>>>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct
>>>> amdgpu_device *adev,
>>>>               trace_amdgpu_vm_bo_mapping(mapping);
>>>>       }
>>>>   +    if (bo_va->base.bo &&
>>>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
>>>> +        dma_fence_put(vm->last_update);
>>>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
>>>> +    }
>>>> +
>>>>       return 0;
>>>>   }
>>>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device
>>>> *adev, struct amdgpu_vm *vm,
>>>>                vm->use_cpu_for_update ? "CPU" : "SDMA");
>>>>       WARN_ONCE((vm->use_cpu_for_update &
>>>> !amdgpu_vm_is_large_bar(adev)),
>>>>             "CPU update of VM recommended only for large BAR
>>>> system\n");
>>>> -    vm->last_dir_update = NULL;
>>>> +    vm->last_update = NULL;
>>>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>>>>               AMDGPU_GEM_CREATE_VRAM_CLEARED; @@ -2692,7 +2697,7 @@
>>>> void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm
>>>> *vm)
>>>>       }
>>>>         amdgpu_vm_free_levels(&vm->root);
>>>> -    dma_fence_put(vm->last_dir_update);
>>>> +    dma_fence_put(vm->last_update);
>>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>>>>           amdgpu_vm_free_reserved_vmid(adev, vm, i);
>>>>   }
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> index c1accd1..cb6a622 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>>>>         /* contains the page directory */
>>>>       struct amdgpu_vm_pt     root;
>>>> -    struct dma_fence    *last_dir_update;
>>>> +    struct dma_fence    *last_update;
>>>>         /* protecting freed */
>>>>       spinlock_t        freed_lock;
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



[-- Attachment #1.2: Type: text/html, Size: 17334 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
       [not found]                                 ` <MWHPR1201MB0206E76FCF5A0B58F57EFCC2B4680-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
@ 2017-09-11 12:49                                   ` Christian König
  0 siblings, 0 replies; 10+ messages in thread
From: Christian König @ 2017-09-11 12:49 UTC (permalink / raw)
  To: Zhou, David(ChunMing); +Cc: He, Roger, amd-gfx


[-- Attachment #1.1: Type: text/plain, Size: 9811 bytes --]

> I don't understand why we need all existed update fence for CS
We don't need all updates for CS, we just need all updates for per VM 
BOs for CS because we don't know which BOs are used in the CS.

That's the overall problem of using per VM BOs, the kernel doesn't know 
what is actually used by this CS and what isn't.

Because of this we need to expect the worst case and sync to all the va 
operations on per VM BOs in flight.

> We need to expand va ioctl.
Well, we could expand the VA ioctl to provide better performance but 
that can only be a second step.

First of all we must fix the existing handling to do what is expected here.

Regards,
Christian.

Am 11.09.2017 um 14:30 schrieb Zhou, David(ChunMing):
> It still doesn't make sense, I don't understand why we need all existed update fence for CS, which could implicitly contain unrelated fence next CS used. For moved list case, we already synced. We should explicitly sync the needed fence.
>
> For per VM BOs, we lack mapping fence, that's why VM fault happens. We need to expand va ioctl.
>
> Regards,
> David
>
> 发自坚果 Pro
>
> Christian K鰊ig <deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org> 于 2017年9月11日 下午8:00写道:
>
> Ok then we have a misunderstanding here. This applies to all per VM 
> BOs, not just kernel BOs.
>
> It's just that as long as the per VM BOs aren't used the handling is 
> still correct and we don't have any updates to UMD BOs we implicitly 
> wait on.
>
> Sorry I should have waited a bit more, but since it is a very rather 
> urgent bug fix I've already pushed it with Rogers rb.
>
> Regards,
> Christian.
>
> Am 11.09.2017 um 13:42 schrieb Zhou, David(ChunMing):
>> My mean is you should add a condition in code to check if BO is kernel BO or not. I expect V3.
>>
>> Regards,
>> David
>>
>> 发自坚果 Pro
>>
>> He, Roger <Hongbo.He-5C7GfCeVMHo@public.gmane.org> 于 2017年9月11日 下午6:57写道:
>>
>> Reviewed-by: Roger He <Hongbo.He-5C7GfCeVMHo@public.gmane.org>
>>
>> Thanks
>> Roger(Hongbo.He)
>> -----Original Message-----
>> From: amd-gfx [mailto:amd-gfx-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org] On 
>> Behalf Of Christian K?nig
>> Sent: Monday, September 11, 2017 6:53 PM
>> To: Zhou, David(ChunMing) <David1.Zhou-5C7GfCeVMHo@public.gmane.org>; 
>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> Subject: Re: [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2
>>
>> Am 11.09.2017 um 11:10 schrieb zhoucm1:
>> >
>> >
>> > On 2017年09月11日 17:04, Christian König wrote:
>> >>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
>> >>> 72fps to 24fps.
>> >>>
>> >>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
>> >>> previous non-per-vm-bo.
>> >>
>> >> This is irrelevant and actually a foreseen consequence of per VM BOs.
>> >>
>> >> If the performance drops is in-acceptable then the UMD shouldn't use
>> >> this feature.
>> > I got your mean, if UMD doesn't use this feature, then all BOs should
>> > be kernel only, then this change should only be limited to kernel VM
>> > BOs, not include UMD BOs.
>> > With this limitation, I think the change also can fix your issue.
>>
>> Correct, yes. Can I get your rb or ab now? We need to get this fixed 
>> asap.
>>
>> Thanks,
>> Christian.
>>
>> >
>> > Regards,
>> > David Zhou
>> >>
>> >>> all moved and relocated fence have already synced, we just need to
>> >>> catch the va mapping but which is CS itself required, as my proposal
>> >>> in another thread, we maybe need to expand va_ioctl to return
>> >>> mapping fence to UMD, then UMD passes it to CS as dependency.
>> >>
>> >> That can be an addition to the existing handling, but first of all
>> >> the current state must be corrected.
>> >>
>> >> There are at least two bug reports on the open driver fixed by this,
>> >> so please review.
>> >>
>> >> Thanks,
>> >> Christian.
>> >>
>> >> Am 11.09.2017 um 10:59 schrieb zhoucm1:
>> >>> NAK, as Roger pointed, the performance of Vulkan Dota2 drops from
>> >>> 72fps to 24fps.
>> >>>
>> >>> This patches will kill Per-vm-BO advantages, even drop to 1/3 of
>> >>> previous non-per-vm-bo.
>> >>>
>> >>> all moved and relocated fence have already synced, we just need to
>> >>> catch the va mapping but which is CS itself required, as my proposal
>> >>> in another thread, we maybe need to expand va_ioctl to return
>> >>> mapping fence to UMD, then UMD passes it to CS as dependency.
>> >>>
>> >>>
>> >>> Regards,
>> >>>
>> >>> David Zhou
>> >>>
>> >>>
>> >>> On 2017年09月11日 15:53, Christian König wrote:
>> >>>> From: Christian König <christian.koenig-5C7GfCeVMHo@public.gmane.org>
>> >>>>
>> >>>> All users of a VM must always wait for updates with always valid
>> >>>> BOs to be completed.
>> >>>>
>> >>>> v2: remove debugging leftovers, rename struct member
>> >>>>
>> >>>> Signed-off-by: Christian König <christian.koenig-5C7GfCeVMHo@public.gmane.org>
>> >>>> ---
>> >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++----
>> >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++-----
>> >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  2 +-
>> >>>>   3 files changed, 17 insertions(+), 10 deletions(-)
>> >>>>
>> >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> >>>> index 8aa37e0..4681dcc 100644
>> >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> >>>> @@ -752,10 +752,6 @@ static int amdgpu_bo_vm_update_pte(struct
>> >>>> amdgpu_cs_parser *p)
>> >>>>       if (r)
>> >>>>           return r;
>> >>>>   -    r = amdgpu_sync_fence(adev, &p->job->sync,
>> >>>> vm->last_dir_update);
>> >>>> -    if (r)
>> >>>> -        return r;
>> >>>> -
>> >>>>       r = amdgpu_vm_clear_freed(adev, vm, NULL);
>> >>>>       if (r)
>> >>>>           return r;
>> >>>> @@ -810,6 +806,12 @@ static int amdgpu_bo_vm_update_pte(struct
>> >>>> amdgpu_cs_parser *p)
>> >>>>       }
>> >>>>         r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
>> >>>> +    if (r)
>> >>>> +        return r;
>> >>>> +
>> >>>> +    r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
>> >>>> +    if (r)
>> >>>> +        return r;
>> >>>>         if (amdgpu_vm_debug && p->bo_list) {
>> >>>>           /* Invalidate all BOs to test for userspace bugs */ diff
>> >>>> --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> >>>> index 55f1ecb..5042f09 100644
>> >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> >>>> @@ -1140,9 +1140,8 @@ static int amdgpu_vm_update_level(struct
>> >>>> amdgpu_device *adev,
>> >>>>                   goto error_free;
>> >>>> amdgpu_bo_fence(parent->base.bo, fence, true);
>> >>>> - dma_fence_put(vm->last_dir_update);
>> >>>> -            vm->last_dir_update = dma_fence_get(fence);
>> >>>> -            dma_fence_put(fence);
>> >>>> + dma_fence_put(vm->last_update);
>> >>>> +            vm->last_update = fence;
>> >>>>           }
>> >>>>       }
>> >>>>   @@ -1803,6 +1802,12 @@ int amdgpu_vm_bo_update(struct
>> >>>> amdgpu_device *adev,
>> >>>> trace_amdgpu_vm_bo_mapping(mapping);
>> >>>>       }
>> >>>>   +    if (bo_va->base.bo &&
>> >>>> +        bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
>> >>>> + dma_fence_put(vm->last_update);
>> >>>> +        vm->last_update = dma_fence_get(bo_va->last_pt_update);
>> >>>> +    }
>> >>>> +
>> >>>>       return 0;
>> >>>>   }
>> >>>>   @@ -2586,7 +2591,7 @@ int amdgpu_vm_init(struct amdgpu_device
>> >>>> *adev, struct amdgpu_vm *vm,
>> >>>> vm->use_cpu_for_update ? "CPU" : "SDMA");
>> >>>> WARN_ONCE((vm->use_cpu_for_update &
>> >>>> !amdgpu_vm_is_large_bar(adev)),
>> >>>>             "CPU update of VM recommended only for large BAR
>> >>>> system\n");
>> >>>> -    vm->last_dir_update = NULL;
>> >>>> +    vm->last_update = NULL;
>> >>>>         flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>> >>>> AMDGPU_GEM_CREATE_VRAM_CLEARED; @@ -2692,7 +2697,7 @@
>> >>>> void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm
>> >>>> *vm)
>> >>>>       }
>> >>>> amdgpu_vm_free_levels(&vm->root);
>> >>>> - dma_fence_put(vm->last_dir_update);
>> >>>> +    dma_fence_put(vm->last_update);
>> >>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>> >>>> amdgpu_vm_free_reserved_vmid(adev, vm, i);
>> >>>>   }
>> >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> >>>> index c1accd1..cb6a622 100644
>> >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> >>>> @@ -140,7 +140,7 @@ struct amdgpu_vm {
>> >>>>         /* contains the page directory */
>> >>>>       struct amdgpu_vm_pt     root;
>> >>>> -    struct dma_fence *last_dir_update;
>> >>>> +    struct dma_fence    *last_update;
>> >>>>         /* protecting freed */
>> >>>>       spinlock_t        freed_lock;
>> >>>
>> >>> _______________________________________________
>> >>> amd-gfx mailing list
>> >>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> >>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>> >>
>> >>
>> >
>> > _______________________________________________
>> > amd-gfx mailing list
>> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>


[-- Attachment #1.2: Type: text/html, Size: 22682 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2017-09-11 12:49 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-11  7:53 [PATCH] drm/amdgpu: fix VM sync with always valid BOs v2 Christian König
     [not found] ` <1505116409-9815-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-11  8:59   ` zhoucm1
     [not found]     ` <5430f286-3089-1be2-98d3-a0a89eb67aee-5C7GfCeVMHo@public.gmane.org>
2017-09-11  9:04       ` Christian König
     [not found]         ` <ba49e14c-8f05-1f1f-1798-2705237b87b0-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-11  9:10           ` zhoucm1
     [not found]             ` <7f5bb679-7385-135e-dc10-d40ba391f082-5C7GfCeVMHo@public.gmane.org>
2017-09-11 10:53               ` Christian König
     [not found]                 ` <98b9088c-c2e1-8f4e-552b-d6db06ca0047-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-11 10:57                   ` He, Roger
     [not found]                     ` <MWHPR1201MB0127808E4B24F827F3B82019FD680-3iK1xFAIwjq9imrIu4W8xGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-09-11 11:42                       ` Zhou, David(ChunMing)
     [not found]                         ` <MWHPR1201MB0206240AA6FA420B2EDA5677B4680-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-09-11 12:00                           ` Christian König
     [not found]                             ` <0566390d-8378-f214-ef64-30b1784f454a-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-09-11 12:30                               ` Zhou, David(ChunMing)
     [not found]                                 ` <MWHPR1201MB0206E76FCF5A0B58F57EFCC2B4680-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-09-11 12:49                                   ` Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.