All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
@ 2020-12-04 20:41 Alex Deucher
  2020-12-09 17:30 ` Alex Deucher
  0 siblings, 1 reply; 8+ messages in thread
From: Alex Deucher @ 2020-12-04 20:41 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alex Deucher

And drop it when we detach.  If the shared buffer is in vram,
we need to make sure we don't put the device into runtime
suspend.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 5b465ab774d1..f63f182f37f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -40,6 +40,7 @@
 #include <linux/dma-buf.h>
 #include <linux/dma-fence-array.h>
 #include <linux/pci-p2pdma.h>
+#include <linux/pm_runtime.h>
 
 /**
  * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
@@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
 	if (attach->dev->driver == adev->dev->driver)
 		return 0;
 
+	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+	if (r < 0)
+		goto out;
+
 	r = amdgpu_bo_reserve(bo, false);
 	if (unlikely(r != 0))
-		return r;
+		goto out;
 
 	/*
 	 * We only create shared fences for internal use, but importers
@@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
 	 */
 	r = __dma_resv_make_exclusive(bo->tbo.base.resv);
 	if (r)
-		return r;
+		goto out;
 
 	bo->prime_shared_count++;
 	amdgpu_bo_unreserve(bo);
 	return 0;
+
+out:
+	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+	return r;
 }
 
 /**
@@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
 
 	if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
 		bo->prime_shared_count--;
+
+	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 }
 
 /**
-- 
2.25.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
  2020-12-04 20:41 [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer Alex Deucher
@ 2020-12-09 17:30 ` Alex Deucher
  2020-12-10  4:49   ` Shashank Sharma
  0 siblings, 1 reply; 8+ messages in thread
From: Alex Deucher @ 2020-12-09 17:30 UTC (permalink / raw)
  To: amd-gfx list; +Cc: Alex Deucher

On Fri, Dec 4, 2020 at 3:41 PM Alex Deucher <alexdeucher@gmail.com> wrote:
>
> And drop it when we detach.  If the shared buffer is in vram,
> we need to make sure we don't put the device into runtime
> suspend.
>
> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>


Ping?  Any thoughts on this?  We really only need this for p2p since
device memory in involved, but I'm not sure of the best way to handle
that.

Alex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
>  1 file changed, 14 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 5b465ab774d1..f63f182f37f9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -40,6 +40,7 @@
>  #include <linux/dma-buf.h>
>  #include <linux/dma-fence-array.h>
>  #include <linux/pci-p2pdma.h>
> +#include <linux/pm_runtime.h>
>
>  /**
>   * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
> @@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>         if (attach->dev->driver == adev->dev->driver)
>                 return 0;
>
> +       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
> +       if (r < 0)
> +               goto out;
> +
>         r = amdgpu_bo_reserve(bo, false);
>         if (unlikely(r != 0))
> -               return r;
> +               goto out;
>
>         /*
>          * We only create shared fences for internal use, but importers
> @@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>          */
>         r = __dma_resv_make_exclusive(bo->tbo.base.resv);
>         if (r)
> -               return r;
> +               goto out;
>
>         bo->prime_shared_count++;
>         amdgpu_bo_unreserve(bo);
>         return 0;
> +
> +out:
> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> +       return r;
>  }
>
>  /**
> @@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
>
>         if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
>                 bo->prime_shared_count--;
> +
> +       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>  }
>
>  /**
> --
> 2.25.4
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
  2020-12-09 17:30 ` Alex Deucher
@ 2020-12-10  4:49   ` Shashank Sharma
  2020-12-10 10:28     ` Christian König
  2020-12-10 15:54     ` Alex Deucher
  0 siblings, 2 replies; 8+ messages in thread
From: Shashank Sharma @ 2020-12-10  4:49 UTC (permalink / raw)
  To: amd-gfx


On 09/12/20 11:00 pm, Alex Deucher wrote:
> On Fri, Dec 4, 2020 at 3:41 PM Alex Deucher <alexdeucher@gmail.com> wrote:
>> And drop it when we detach.  If the shared buffer is in vram,
>> we need to make sure we don't put the device into runtime
>> suspend.
>>
>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>
> Ping?  Any thoughts on this?  We really only need this for p2p since
> device memory in involved, but I'm not sure of the best way to handle
> that.
>
> Alex
>
>
>> ---
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
>>  1 file changed, 14 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> index 5b465ab774d1..f63f182f37f9 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> @@ -40,6 +40,7 @@
>>  #include <linux/dma-buf.h>
>>  #include <linux/dma-fence-array.h>
>>  #include <linux/pci-p2pdma.h>
>> +#include <linux/pm_runtime.h>
>>
>>  /**
>>   * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
>> @@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>         if (attach->dev->driver == adev->dev->driver)
>>                 return 0;
>>
>> +       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
>> +       if (r < 0)
>> +               goto out;
>> +
I am a bit skeptical if we should fail the BO reserve if we don't get the sync ? I was hoping to continue with it, with a warning maybe, so that it doesn't block the existing functionality ?
>>         r = amdgpu_bo_reserve(bo, false);
>>         if (unlikely(r != 0))
>> -               return r;
>> +               goto out;
>>
>>         /*
>>          * We only create shared fences for internal use, but importers
>> @@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>          */
>>         r = __dma_resv_make_exclusive(bo->tbo.base.resv);
>>         if (r)
>> -               return r;
>> +               goto out;
>>
>>         bo->prime_shared_count++;
>>         amdgpu_bo_unreserve(bo);
>>         return 0;
>> +
>> +out:
>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);

Why not just pm_runtime_put_sync ? Why autosuspend ?

- Shashank

>> +       return r;
>>  }
>>
>>  /**
>> @@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
>>
>>         if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
>>                 bo->prime_shared_count--;
>> +
>> +       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>>  }
>>
>>  /**
>> --
>> 2.25.4
>>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7Cd5fccf427c34414ff4e708d89c682898%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637431318483043257%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=NpCTY%2FVKd%2FBDi1wsFC79qSUTmNHx3nBp0HUj3m0cFeM%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
  2020-12-10  4:49   ` Shashank Sharma
@ 2020-12-10 10:28     ` Christian König
  2020-12-10 13:03       ` Shashank Sharma
  2020-12-10 15:54     ` Alex Deucher
  1 sibling, 1 reply; 8+ messages in thread
From: Christian König @ 2020-12-10 10:28 UTC (permalink / raw)
  To: Shashank Sharma, amd-gfx

Am 10.12.20 um 05:49 schrieb Shashank Sharma:
> On 09/12/20 11:00 pm, Alex Deucher wrote:
>> On Fri, Dec 4, 2020 at 3:41 PM Alex Deucher <alexdeucher@gmail.com> wrote:
>>> And drop it when we detach.  If the shared buffer is in vram,
>>> we need to make sure we don't put the device into runtime
>>> suspend.
>>>
>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>> Ping?  Any thoughts on this?  We really only need this for p2p since
>> device memory in involved, but I'm not sure of the best way to handle
>> that.
>>
>> Alex
>>
>>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
>>>   1 file changed, 14 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>> index 5b465ab774d1..f63f182f37f9 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>> @@ -40,6 +40,7 @@
>>>   #include <linux/dma-buf.h>
>>>   #include <linux/dma-fence-array.h>
>>>   #include <linux/pci-p2pdma.h>
>>> +#include <linux/pm_runtime.h>
>>>
>>>   /**
>>>    * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
>>> @@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>>          if (attach->dev->driver == adev->dev->driver)
>>>                  return 0;
>>>
>>> +       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
>>> +       if (r < 0)
>>> +               goto out;
>>> +
> I am a bit skeptical if we should fail the BO reserve if we don't get the sync ? I was hoping to continue with it, with a warning maybe, so that it doesn't block the existing functionality ?

I'm not sure why pm_runtime_get_sync() could fail, but not attaching the 
DMA-buf is certainly the best we could do here in that moment.

Otherwise we could end up in accessing the PCIe BAR with power disabled 
and that's indeed kind of bad.

Christian.

>>>          r = amdgpu_bo_reserve(bo, false);
>>>          if (unlikely(r != 0))
>>> -               return r;
>>> +               goto out;
>>>
>>>          /*
>>>           * We only create shared fences for internal use, but importers
>>> @@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>>           */
>>>          r = __dma_resv_make_exclusive(bo->tbo.base.resv);
>>>          if (r)
>>> -               return r;
>>> +               goto out;
>>>
>>>          bo->prime_shared_count++;
>>>          amdgpu_bo_unreserve(bo);
>>>          return 0;
>>> +
>>> +out:
>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> Why not just pm_runtime_put_sync ? Why autosuspend ?
>
> - Shashank
>
>>> +       return r;
>>>   }
>>>
>>>   /**
>>> @@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
>>>
>>>          if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
>>>                  bo->prime_shared_count--;
>>> +
>>> +       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>>>   }
>>>
>>>   /**
>>> --
>>> 2.25.4
>>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7Cd5fccf427c34414ff4e708d89c682898%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637431318483043257%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=NpCTY%2FVKd%2FBDi1wsFC79qSUTmNHx3nBp0HUj3m0cFeM%3D&amp;reserved=0
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
  2020-12-10 10:28     ` Christian König
@ 2020-12-10 13:03       ` Shashank Sharma
  2020-12-10 15:53         ` Alex Deucher
  0 siblings, 1 reply; 8+ messages in thread
From: Shashank Sharma @ 2020-12-10 13:03 UTC (permalink / raw)
  To: christian.koenig, amd-gfx


On 10/12/20 3:58 pm, Christian König wrote:
> Am 10.12.20 um 05:49 schrieb Shashank Sharma:
>> On 09/12/20 11:00 pm, Alex Deucher wrote:
>>> On Fri, Dec 4, 2020 at 3:41 PM Alex Deucher <alexdeucher@gmail.com> wrote:
>>>> And drop it when we detach.  If the shared buffer is in vram,
>>>> we need to make sure we don't put the device into runtime
>>>> suspend.
>>>>
>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>> Ping?  Any thoughts on this?  We really only need this for p2p since
>>> device memory in involved, but I'm not sure of the best way to handle
>>> that.
>>>
>>> Alex
>>>
>>>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
>>>>   1 file changed, 14 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>>> index 5b465ab774d1..f63f182f37f9 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>>> @@ -40,6 +40,7 @@
>>>>   #include <linux/dma-buf.h>
>>>>   #include <linux/dma-fence-array.h>
>>>>   #include <linux/pci-p2pdma.h>
>>>> +#include <linux/pm_runtime.h>
>>>>
>>>>   /**
>>>>    * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
>>>> @@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>>>          if (attach->dev->driver == adev->dev->driver)
>>>>                  return 0;
>>>>
>>>> +       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
>>>> +       if (r < 0)
>>>> +               goto out;
>>>> +
>> I am a bit skeptical if we should fail the BO reserve if we don't get the sync ? I was hoping to continue with it, with a warning maybe, so that it doesn't block the existing functionality ?
> I'm not sure why pm_runtime_get_sync() could fail, but not attaching the 
> DMA-buf is certainly the best we could do here in that moment.

Ah, I see. Just curious about, before this patch, when we tried to reserve the BO, without the PM sync, how was that doing OK ?

- Shashank

> Otherwise we could end up in accessing the PCIe BAR with power disabled 
> and that's indeed kind of bad.
>
> Christian.
>
>>>>          r = amdgpu_bo_reserve(bo, false);
>>>>          if (unlikely(r != 0))
>>>> -               return r;
>>>> +               goto out;
>>>>
>>>>          /*
>>>>           * We only create shared fences for internal use, but importers
>>>> @@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>>>           */
>>>>          r = __dma_resv_make_exclusive(bo->tbo.base.resv);
>>>>          if (r)
>>>> -               return r;
>>>> +               goto out;
>>>>
>>>>          bo->prime_shared_count++;
>>>>          amdgpu_bo_unreserve(bo);
>>>>          return 0;
>>>> +
>>>> +out:
>>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>> Why not just pm_runtime_put_sync ? Why autosuspend ?
>>
>> - Shashank
>>
>>>> +       return r;
>>>>   }
>>>>
>>>>   /**
>>>> @@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
>>>>
>>>>          if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
>>>>                  bo->prime_shared_count--;
>>>> +
>>>> +       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
>>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>>>>   }
>>>>
>>>>   /**
>>>> --
>>>> 2.25.4
>>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7C9957c5b838fc49ae225e08d89cf649a1%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C1%7C637431928895902617%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&amp;sdata=AMtWNSZRyFZDRHhE7hsIdBWxTHVLGYVOHZOSeRSR07s%3D&amp;reserved=0
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7C9957c5b838fc49ae225e08d89cf649a1%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C1%7C637431928895902617%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&amp;sdata=AMtWNSZRyFZDRHhE7hsIdBWxTHVLGYVOHZOSeRSR07s%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
  2020-12-10 13:03       ` Shashank Sharma
@ 2020-12-10 15:53         ` Alex Deucher
  2020-12-10 17:49           ` Shashank Sharma
  0 siblings, 1 reply; 8+ messages in thread
From: Alex Deucher @ 2020-12-10 15:53 UTC (permalink / raw)
  To: Shashank Sharma; +Cc: Christian Koenig, amd-gfx list

On Thu, Dec 10, 2020 at 8:03 AM Shashank Sharma <shashank.sharma@amd.com> wrote:
>
>
> On 10/12/20 3:58 pm, Christian König wrote:
> > Am 10.12.20 um 05:49 schrieb Shashank Sharma:
> >> On 09/12/20 11:00 pm, Alex Deucher wrote:
> >>> On Fri, Dec 4, 2020 at 3:41 PM Alex Deucher <alexdeucher@gmail.com> wrote:
> >>>> And drop it when we detach.  If the shared buffer is in vram,
> >>>> we need to make sure we don't put the device into runtime
> >>>> suspend.
> >>>>
> >>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
> >>> Ping?  Any thoughts on this?  We really only need this for p2p since
> >>> device memory in involved, but I'm not sure of the best way to handle
> >>> that.
> >>>
> >>> Alex
> >>>
> >>>
> >>>> ---
> >>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
> >>>>   1 file changed, 14 insertions(+), 2 deletions(-)
> >>>>
> >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >>>> index 5b465ab774d1..f63f182f37f9 100644
> >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >>>> @@ -40,6 +40,7 @@
> >>>>   #include <linux/dma-buf.h>
> >>>>   #include <linux/dma-fence-array.h>
> >>>>   #include <linux/pci-p2pdma.h>
> >>>> +#include <linux/pm_runtime.h>
> >>>>
> >>>>   /**
> >>>>    * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
> >>>> @@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
> >>>>          if (attach->dev->driver == adev->dev->driver)
> >>>>                  return 0;
> >>>>
> >>>> +       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
> >>>> +       if (r < 0)
> >>>> +               goto out;
> >>>> +
> >> I am a bit skeptical if we should fail the BO reserve if we don't get the sync ? I was hoping to continue with it, with a warning maybe, so that it doesn't block the existing functionality ?
> > I'm not sure why pm_runtime_get_sync() could fail, but not attaching the
> > DMA-buf is certainly the best we could do here in that moment.
>
> Ah, I see. Just curious about, before this patch, when we tried to reserve the BO, without the PM sync, how was that doing OK ?

p2p is not widely used at this point, so we never really hit is except
for specific testing of the functionality.

Alex

>
> - Shashank
>
> > Otherwise we could end up in accessing the PCIe BAR with power disabled
> > and that's indeed kind of bad.
> >
> > Christian.
> >
> >>>>          r = amdgpu_bo_reserve(bo, false);
> >>>>          if (unlikely(r != 0))
> >>>> -               return r;
> >>>> +               goto out;
> >>>>
> >>>>          /*
> >>>>           * We only create shared fences for internal use, but importers
> >>>> @@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
> >>>>           */
> >>>>          r = __dma_resv_make_exclusive(bo->tbo.base.resv);
> >>>>          if (r)
> >>>> -               return r;
> >>>> +               goto out;
> >>>>
> >>>>          bo->prime_shared_count++;
> >>>>          amdgpu_bo_unreserve(bo);
> >>>>          return 0;
> >>>> +
> >>>> +out:
> >>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> >> Why not just pm_runtime_put_sync ? Why autosuspend ?
> >>
> >> - Shashank
> >>
> >>>> +       return r;
> >>>>   }
> >>>>
> >>>>   /**
> >>>> @@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
> >>>>
> >>>>          if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
> >>>>                  bo->prime_shared_count--;
> >>>> +
> >>>> +       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> >>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> >>>>   }
> >>>>
> >>>>   /**
> >>>> --
> >>>> 2.25.4
> >>>>
> >>> _______________________________________________
> >>> amd-gfx mailing list
> >>> amd-gfx@lists.freedesktop.org
> >>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7C9957c5b838fc49ae225e08d89cf649a1%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C1%7C637431928895902617%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&amp;sdata=AMtWNSZRyFZDRHhE7hsIdBWxTHVLGYVOHZOSeRSR07s%3D&amp;reserved=0
> >> _______________________________________________
> >> amd-gfx mailing list
> >> amd-gfx@lists.freedesktop.org
> >> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7C9957c5b838fc49ae225e08d89cf649a1%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C1%7C637431928895902617%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&amp;sdata=AMtWNSZRyFZDRHhE7hsIdBWxTHVLGYVOHZOSeRSR07s%3D&amp;reserved=0
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
  2020-12-10  4:49   ` Shashank Sharma
  2020-12-10 10:28     ` Christian König
@ 2020-12-10 15:54     ` Alex Deucher
  1 sibling, 0 replies; 8+ messages in thread
From: Alex Deucher @ 2020-12-10 15:54 UTC (permalink / raw)
  To: Shashank Sharma; +Cc: amd-gfx list

On Wed, Dec 9, 2020 at 11:49 PM Shashank Sharma <shashank.sharma@amd.com> wrote:
>
>
> On 09/12/20 11:00 pm, Alex Deucher wrote:
> > On Fri, Dec 4, 2020 at 3:41 PM Alex Deucher <alexdeucher@gmail.com> wrote:
> >> And drop it when we detach.  If the shared buffer is in vram,
> >> we need to make sure we don't put the device into runtime
> >> suspend.
> >>
> >> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
> >
> > Ping?  Any thoughts on this?  We really only need this for p2p since
> > device memory in involved, but I'm not sure of the best way to handle
> > that.
> >
> > Alex
> >
> >
> >> ---
> >>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
> >>  1 file changed, 14 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >> index 5b465ab774d1..f63f182f37f9 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> >> @@ -40,6 +40,7 @@
> >>  #include <linux/dma-buf.h>
> >>  #include <linux/dma-fence-array.h>
> >>  #include <linux/pci-p2pdma.h>
> >> +#include <linux/pm_runtime.h>
> >>
> >>  /**
> >>   * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
> >> @@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
> >>         if (attach->dev->driver == adev->dev->driver)
> >>                 return 0;
> >>
> >> +       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
> >> +       if (r < 0)
> >> +               goto out;
> >> +
> I am a bit skeptical if we should fail the BO reserve if we don't get the sync ? I was hoping to continue with it, with a warning maybe, so that it doesn't block the existing functionality ?
> >>         r = amdgpu_bo_reserve(bo, false);
> >>         if (unlikely(r != 0))
> >> -               return r;
> >> +               goto out;
> >>
> >>         /*
> >>          * We only create shared fences for internal use, but importers
> >> @@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
> >>          */
> >>         r = __dma_resv_make_exclusive(bo->tbo.base.resv);
> >>         if (r)
> >> -               return r;
> >> +               goto out;
> >>
> >>         bo->prime_shared_count++;
> >>         amdgpu_bo_unreserve(bo);
> >>         return 0;
> >> +
> >> +out:
> >> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>
> Why not just pm_runtime_put_sync ? Why autosuspend ?

Not sure.  I'm just copying what we do in other cases which happens to
work.  I'm not really a runtime pm expert.

Alex

>
> - Shashank
>
> >> +       return r;
> >>  }
> >>
> >>  /**
> >> @@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
> >>
> >>         if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
> >>                 bo->prime_shared_count--;
> >> +
> >> +       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
> >> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
> >>  }
> >>
> >>  /**
> >> --
> >> 2.25.4
> >>
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7Cd5fccf427c34414ff4e708d89c682898%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637431318483043257%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=NpCTY%2FVKd%2FBDi1wsFC79qSUTmNHx3nBp0HUj3m0cFeM%3D&amp;reserved=0
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer
  2020-12-10 15:53         ` Alex Deucher
@ 2020-12-10 17:49           ` Shashank Sharma
  0 siblings, 0 replies; 8+ messages in thread
From: Shashank Sharma @ 2020-12-10 17:49 UTC (permalink / raw)
  To: Alex Deucher; +Cc: Christian Koenig, amd-gfx list


On 10/12/20 9:23 pm, Alex Deucher wrote:
> On Thu, Dec 10, 2020 at 8:03 AM Shashank Sharma <shashank.sharma@amd.com> wrote:
>>
>> On 10/12/20 3:58 pm, Christian König wrote:
>>> Am 10.12.20 um 05:49 schrieb Shashank Sharma:
>>>> On 09/12/20 11:00 pm, Alex Deucher wrote:
>>>>> On Fri, Dec 4, 2020 at 3:41 PM Alex Deucher <alexdeucher@gmail.com> wrote:
>>>>>> And drop it when we detach.  If the shared buffer is in vram,
>>>>>> we need to make sure we don't put the device into runtime
>>>>>> suspend.
>>>>>>
>>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>> Ping?  Any thoughts on this?  We really only need this for p2p since
>>>>> device memory in involved, but I'm not sure of the best way to handle
>>>>> that.
>>>>>
>>>>> Alex
>>>>>> ---
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 16 ++++++++++++++--
>>>>>>   1 file changed, 14 insertions(+), 2 deletions(-)
>>>>>>
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>>>>> index 5b465ab774d1..f63f182f37f9 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>>>>>> @@ -40,6 +40,7 @@
>>>>>>   #include <linux/dma-buf.h>
>>>>>>   #include <linux/dma-fence-array.h>
>>>>>>   #include <linux/pci-p2pdma.h>
>>>>>> +#include <linux/pm_runtime.h>
>>>>>>
>>>>>>   /**
>>>>>>    * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
>>>>>> @@ -187,9 +188,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>>>>>          if (attach->dev->driver == adev->dev->driver)
>>>>>>                  return 0;
>>>>>>
>>>>>> +       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
>>>>>> +       if (r < 0)
>>>>>> +               goto out;
>>>>>> +
>>>> I am a bit skeptical if we should fail the BO reserve if we don't get the sync ? I was hoping to continue with it, with a warning maybe, so that it doesn't block the existing functionality ?
>>> I'm not sure why pm_runtime_get_sync() could fail, but not attaching the
>>> DMA-buf is certainly the best we could do here in that moment.
>> Ah, I see. Just curious about, before this patch, when we tried to reserve the BO, without the PM sync, how was that doing OK ?
> p2p is not widely used at this point, so we never really hit is except
> for specific testing of the functionality.
>
> Alex

Got it. Apart from this, looks fine by me.

Acked-by: Shashank Sharma <shashank.sharma@amd.com>

>
>> - Shashank
>>
>>> Otherwise we could end up in accessing the PCIe BAR with power disabled
>>> and that's indeed kind of bad.
>>>
>>> Christian.
>>>
>>>>>>          r = amdgpu_bo_reserve(bo, false);
>>>>>>          if (unlikely(r != 0))
>>>>>> -               return r;
>>>>>> +               goto out;
>>>>>>
>>>>>>          /*
>>>>>>           * We only create shared fences for internal use, but importers
>>>>>> @@ -201,11 +206,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
>>>>>>           */
>>>>>>          r = __dma_resv_make_exclusive(bo->tbo.base.resv);
>>>>>>          if (r)
>>>>>> -               return r;
>>>>>> +               goto out;
>>>>>>
>>>>>>          bo->prime_shared_count++;
>>>>>>          amdgpu_bo_unreserve(bo);
>>>>>>          return 0;
>>>>>> +
>>>>>> +out:
>>>>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>>>> Why not just pm_runtime_put_sync ? Why autosuspend ?
>>>>
>>>> - Shashank
>>>>
>>>>>> +       return r;
>>>>>>   }
>>>>>>
>>>>>>   /**
>>>>>> @@ -225,6 +234,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
>>>>>>
>>>>>>          if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
>>>>>>                  bo->prime_shared_count--;
>>>>>> +
>>>>>> +       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
>>>>>> +       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
>>>>>>   }
>>>>>>
>>>>>>   /**
>>>>>> --
>>>>>> 2.25.4
>>>>>>
>>>>> _______________________________________________
>>>>> amd-gfx mailing list
>>>>> amd-gfx@lists.freedesktop.org
>>>>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7C222e41b3837c4d7f3ec908d89d23ca8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637432124337679194%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&amp;sdata=bgSvn4XqmoKffyCF1v1ldVptiKMZXfEfkzFnJj3Ca0Y%3D&amp;reserved=0
>>>> _______________________________________________
>>>> amd-gfx mailing list
>>>> amd-gfx@lists.freedesktop.org
>>>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7C222e41b3837c4d7f3ec908d89d23ca8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637432124337679194%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&amp;sdata=bgSvn4XqmoKffyCF1v1ldVptiKMZXfEfkzFnJj3Ca0Y%3D&amp;reserved=0
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cshashank.sharma%40amd.com%7C222e41b3837c4d7f3ec908d89d23ca8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637432124337679194%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&amp;sdata=bgSvn4XqmoKffyCF1v1ldVptiKMZXfEfkzFnJj3Ca0Y%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-12-10 17:49 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-04 20:41 [PATCH] drm/amdgpu: take runtime pm reference when we attach a buffer Alex Deucher
2020-12-09 17:30 ` Alex Deucher
2020-12-10  4:49   ` Shashank Sharma
2020-12-10 10:28     ` Christian König
2020-12-10 13:03       ` Shashank Sharma
2020-12-10 15:53         ` Alex Deucher
2020-12-10 17:49           ` Shashank Sharma
2020-12-10 15:54     ` Alex Deucher

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.