All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nirmoy <nirmodas@amd.com>
To: "Christian König" <christian.koenig@amd.com>,
	"Nirmoy Das" <nirmoy.das@amd.com>,
	amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
Date: Fri, 8 Jan 2021 16:37:30 +0100	[thread overview]
Message-ID: <7c8cba66-9846-145f-1e83-8499b8762b72@amd.com> (raw)
In-Reply-To: <73fda446-033b-8e59-0c94-e96d52e17d18@amd.com>


On 1/8/21 4:11 PM, Christian König wrote:
> Mhm, looks like that doesn't work on SI.


Thanks, sorry I couldn't verify that :/


Nirmoy


>
> Most likely an alignment issue. Going to investigate.
>
> Regards,
> Christian.
>
> Am 08.01.21 um 14:38 schrieb Christian König:
>> Ah, right I wanted to test this on SI based hardware.
>>
>> Can take care of that, just give me a few hours.
>>
>> Thanks,
>> Christian.
>>
>> Am 08.01.21 um 13:20 schrieb Nirmoy:
>>> Ping!
>>>
>>> On 12/18/20 3:40 PM, Nirmoy Das wrote:
>>>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>>>
>>>> v2:
>>>> * do not initialize bo to get hint from compiler for -Wuninitialized
>>>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>>>>
>>>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 
>>>> +++++++------------------
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>>>>   3 files changed, 45 insertions(+), 75 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>> index 8b989670ed66..13450a3df044 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct 
>>>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>>>>       return 0;
>>>>   }
>>>>
>>>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct 
>>>> amdgpu_bo *bo,
>>>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct 
>>>> amdgpu_sa_bo *bo,
>>>>                      bool direct, struct dma_fence **fence)
>>>>   {
>>>>       struct amdgpu_device *adev = ring->adev;
>>>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct 
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>       unsigned offset_idx = 0;
>>>>       unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>>>
>>>> -    amdgpu_bo_kunmap(bo);
>>>> -    amdgpu_bo_unpin(bo);
>>>> -
>>>> -    if (!ring->adev->uvd.address_64_bit) {
>>>> -        struct ttm_operation_ctx ctx = { true, false };
>>>> -
>>>> -        amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>>>> -        amdgpu_uvd_force_into_uvd_segment(bo);
>>>> -        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>>>> -        if (r)
>>>> -            goto err;
>>>> -    }
>>>> -
>>>>       r = amdgpu_job_alloc_with_ib(adev, 64, direct ? 
>>>> AMDGPU_IB_POOL_DIRECT :
>>>>                        AMDGPU_IB_POOL_DELAYED, &job);
>>>>       if (r)
>>>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct 
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>       data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>>>
>>>>       ib = &job->ibs[0];
>>>> -    addr = amdgpu_bo_gpu_offset(bo);
>>>> +    addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>       ib->ptr[0] = data[0];
>>>>       ib->ptr[1] = addr;
>>>>       ib->ptr[2] = data[1];
>>>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct 
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>       ib->length_dw = 16;
>>>>
>>>>       if (direct) {
>>>> -        r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>>>> -                            true, false,
>>>> -                            msecs_to_jiffies(10));
>>>> -        if (r == 0)
>>>> -            r = -ETIMEDOUT;
>>>> -        if (r < 0)
>>>> -            goto err_free;
>>>> -
>>>>           r = amdgpu_job_submit_direct(job, ring, &f);
>>>>           if (r)
>>>>               goto err_free;
>>>>       } else {
>>>> -        r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>>>> -                     AMDGPU_SYNC_ALWAYS,
>>>> -                     AMDGPU_FENCE_OWNER_UNDEFINED);
>>>> -        if (r)
>>>> -            goto err_free;
>>>> -
>>>>           r = amdgpu_job_submit(job, &adev->uvd.entity,
>>>>                         AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>>>>           if (r)
>>>>               goto err_free;
>>>>       }
>>>>
>>>> -    amdgpu_bo_fence(bo, f, false);
>>>> -    amdgpu_bo_unreserve(bo);
>>>> -    amdgpu_bo_unref(&bo);
>>>> +    amdgpu_sa_bo_free(adev, &bo, f);
>>>>
>>>>       if (fence)
>>>>           *fence = dma_fence_get(f);
>>>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct 
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>       amdgpu_job_free(job);
>>>>
>>>>   err:
>>>> -    amdgpu_bo_unreserve(bo);
>>>> -    amdgpu_bo_unref(&bo);
>>>> +    amdgpu_sa_bo_free(adev, &bo, NULL);
>>>>       return r;
>>>>   }
>>>>
>>>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct 
>>>> amdgpu_ring *ring, uint32_t handle,
>>>>                     struct dma_fence **fence)
>>>>   {
>>>>       struct amdgpu_device *adev = ring->adev;
>>>> -    struct amdgpu_bo *bo = NULL;
>>>> +    struct amdgpu_sa_bo *bo;
>>>>       uint32_t *msg;
>>>>       int r, i;
>>>>
>>>> -    r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> -                      AMDGPU_GEM_DOMAIN_VRAM,
>>>> -                      &bo, NULL, (void **)&msg);
>>>> +    r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> +                 &bo, 1024, PAGE_SIZE);
>>>> +
>>>>       if (r)
>>>>           return r;
>>>>
>>>> +    msg = amdgpu_sa_bo_cpu_addr(bo);
>>>>       /* stitch together an UVD create msg */
>>>>       msg[0] = cpu_to_le32(0x00000de4);
>>>>       msg[1] = cpu_to_le32(0x00000000);
>>>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct 
>>>> amdgpu_ring *ring, uint32_t handle,
>>>>                      bool direct, struct dma_fence **fence)
>>>>   {
>>>>       struct amdgpu_device *adev = ring->adev;
>>>> -    struct amdgpu_bo *bo = NULL;
>>>> +    struct amdgpu_sa_bo *bo;
>>>>       uint32_t *msg;
>>>>       int r, i;
>>>>
>>>> -    r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> -                      AMDGPU_GEM_DOMAIN_VRAM,
>>>> -                      &bo, NULL, (void **)&msg);
>>>> +    r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> +                 &bo, 1024, PAGE_SIZE);
>>>> +
>>>>       if (r)
>>>>           return r;
>>>>
>>>> +    msg = amdgpu_sa_bo_cpu_addr(bo);
>>>>       /* stitch together an UVD destroy msg */
>>>>       msg[0] = cpu_to_le32(0x00000de4);
>>>>       msg[1] = cpu_to_le32(0x00000002);
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>> index 0d5284b936e4..bce29d6975d3 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>>>
>>>>   static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>>>>   static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
>>>> uint32_t handle,
>>>> -                     struct amdgpu_bo *bo,
>>>> +                     struct amdgpu_sa_bo *bo,
>>>>                        struct dma_fence **fence);
>>>>   static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, 
>>>> uint32_t handle,
>>>>                         bool direct, struct dma_fence **fence);
>>>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct 
>>>> amdgpu_device *adev, struct drm_file *filp)
>>>>    * Open up a stream for HW test
>>>>    */
>>>>   static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, 
>>>> uint32_t handle,
>>>> -                     struct amdgpu_bo *bo,
>>>> +                     struct amdgpu_sa_bo *bo,
>>>>                        struct dma_fence **fence)
>>>>   {
>>>>       const unsigned ib_size_dw = 1024;
>>>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct 
>>>> amdgpu_ring *ring, uint32_t handle,
>>>>
>>>>       ib = &job->ibs[0];
>>>>
>>>> -    addr = amdgpu_bo_gpu_offset(bo);
>>>> +    addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>
>>>>       /* stitch together an VCE create msg */
>>>>       ib->length_dw = 0;
>>>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct 
>>>> amdgpu_ring *ring)
>>>>   int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>>>>   {
>>>>       struct dma_fence *fence = NULL;
>>>> -    struct amdgpu_bo *bo = NULL;
>>>> +    struct amdgpu_sa_bo *bo = NULL;
>>>> +    struct amdgpu_device *adev = ring->adev;
>>>>       long r;
>>>>
>>>>       /* skip vce ring1/2 ib test for now, since it's not reliable */
>>>>       if (ring != &ring->adev->vce.ring[0])
>>>>           return 0;
>>>>
>>>> -    r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>>>> -                      AMDGPU_GEM_DOMAIN_VRAM,
>>>> -                      &bo, NULL, NULL);
>>>> +    r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> +                 &bo, 512, PAGE_SIZE);
>>>>       if (r)
>>>>           return r;
>>>>
>>>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct 
>>>> amdgpu_ring *ring, long timeout)
>>>>           r = 0;
>>>>
>>>>   error:
>>>> +    amdgpu_sa_bo_free(adev, &bo, fence);
>>>>       dma_fence_put(fence);
>>>> -    amdgpu_bo_unreserve(bo);
>>>> -    amdgpu_bo_unref(&bo);
>>>>       return r;
>>>>   }
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>> index 4a77c7424dfc..1e46b2f895ba 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct 
>>>> amdgpu_ring *ring)
>>>>   }
>>>>
>>>>   static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>>>> -                   struct amdgpu_bo *bo,
>>>> +                   struct amdgpu_sa_bo *bo,
>>>>                      struct dma_fence **fence)
>>>>   {
>>>>       struct amdgpu_device *adev = ring->adev;
>>>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct 
>>>> amdgpu_ring *ring,
>>>>           goto err;
>>>>
>>>>       ib = &job->ibs[0];
>>>> -    addr = amdgpu_bo_gpu_offset(bo);
>>>> +    addr = amdgpu_sa_bo_gpu_addr(bo);
>>>> +
>>>>       ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>>>>       ib->ptr[1] = addr;
>>>>       ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>>>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct 
>>>> amdgpu_ring *ring,
>>>>       if (r)
>>>>           goto err_free;
>>>>
>>>> -    amdgpu_bo_fence(bo, f, false);
>>>> -    amdgpu_bo_unreserve(bo);
>>>> -    amdgpu_bo_unref(&bo);
>>>> +    amdgpu_sa_bo_free(adev, &bo, f);
>>>>
>>>>       if (fence)
>>>>           *fence = dma_fence_get(f);
>>>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct 
>>>> amdgpu_ring *ring,
>>>>       amdgpu_job_free(job);
>>>>
>>>>   err:
>>>> -    amdgpu_bo_unreserve(bo);
>>>> -    amdgpu_bo_unref(&bo);
>>>> +    amdgpu_sa_bo_free(adev, &bo, NULL);
>>>>       return r;
>>>>   }
>>>>
>>>>   static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring 
>>>> *ring, uint32_t handle,
>>>> -                     struct amdgpu_bo **bo)
>>>> +                     struct amdgpu_sa_bo **bo)
>>>>   {
>>>>       struct amdgpu_device *adev = ring->adev;
>>>>       uint32_t *msg;
>>>>       int r, i;
>>>>
>>>>       *bo = NULL;
>>>> -    r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> -                      AMDGPU_GEM_DOMAIN_VRAM,
>>>> -                      bo, NULL, (void **)&msg);
>>>> +
>>>> +    r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> +                 bo, 1024, PAGE_SIZE);
>>>> +
>>>>       if (r)
>>>>           return r;
>>>>
>>>> +    msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>> +
>>>>       msg[0] = cpu_to_le32(0x00000028);
>>>>       msg[1] = cpu_to_le32(0x00000038);
>>>>       msg[2] = cpu_to_le32(0x00000001);
>>>> @@ -575,18 +576,19 @@ static int 
>>>> amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>>>>   }
>>>>
>>>>   static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring 
>>>> *ring, uint32_t handle,
>>>> -                      struct amdgpu_bo **bo)
>>>> +                      struct amdgpu_sa_bo **bo)
>>>>   {
>>>>       struct amdgpu_device *adev = ring->adev;
>>>>       uint32_t *msg;
>>>>       int r, i;
>>>>
>>>>       *bo = NULL;
>>>> -    r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> -                      AMDGPU_GEM_DOMAIN_VRAM,
>>>> -                      bo, NULL, (void **)&msg);
>>>> +    r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> +                 bo, 1024, PAGE_SIZE);
>>>> +
>>>>       if (r)
>>>>           return r;
>>>> +    msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>>
>>>>       msg[0] = cpu_to_le32(0x00000028);
>>>>       msg[1] = cpu_to_le32(0x00000018);
>>>> @@ -603,7 +605,7 @@ static int 
>>>> amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>>>>   int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long 
>>>> timeout)
>>>>   {
>>>>       struct dma_fence *fence = NULL;
>>>> -    struct amdgpu_bo *bo;
>>>> +    struct amdgpu_sa_bo *bo;
>>>>       long r;
>>>>
>>>>       r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct 
>>>> amdgpu_ring *ring, long timeout)
>>>>   }
>>>>
>>>>   static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>>>> -                   struct amdgpu_bo *bo,
>>>> +                   struct amdgpu_sa_bo *bo,
>>>>                      struct dma_fence **fence)
>>>>   {
>>>>       struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>>>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct 
>>>> amdgpu_ring *ring,
>>>>           goto err;
>>>>
>>>>       ib = &job->ibs[0];
>>>> -    addr = amdgpu_bo_gpu_offset(bo);
>>>> +    addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>       ib->length_dw = 0;
>>>>
>>>>       ib->ptr[ib->length_dw++] = sizeof(struct 
>>>> amdgpu_vcn_decode_buffer) + 8;
>>>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct 
>>>> amdgpu_ring *ring,
>>>>       if (r)
>>>>           goto err_free;
>>>>
>>>> -    amdgpu_bo_fence(bo, f, false);
>>>> -    amdgpu_bo_unreserve(bo);
>>>> -    amdgpu_bo_unref(&bo);
>>>> +    amdgpu_sa_bo_free(adev, &bo, f);
>>>>
>>>>       if (fence)
>>>>           *fence = dma_fence_get(f);
>>>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct 
>>>> amdgpu_ring *ring,
>>>>       amdgpu_job_free(job);
>>>>
>>>>   err:
>>>> -    amdgpu_bo_unreserve(bo);
>>>> -    amdgpu_bo_unref(&bo);
>>>> +    amdgpu_sa_bo_free(adev, &bo, NULL);
>>>>       return r;
>>>>   }
>>>>
>>>>   int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long 
>>>> timeout)
>>>>   {
>>>>       struct dma_fence *fence = NULL;
>>>> -    struct amdgpu_bo *bo;
>>>> +    struct amdgpu_sa_bo *bo;
>>>>       long r;
>>>>
>>>>       r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>>> -- 
>>>> 2.29.2
>>>>
>>
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2021-01-08 15:37 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-18 13:55 [PATCH 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests Nirmoy Das
2020-12-18 14:02 ` Christian König
2020-12-18 14:40   ` [PATCH v2 " Nirmoy Das
2021-01-08 12:20     ` Nirmoy
2021-01-08 13:38       ` Christian König
2021-01-08 15:11         ` Christian König
2021-01-08 15:37           ` Nirmoy [this message]
2021-01-08 15:25     ` Christian König
2021-01-08 15:42       ` Nirmoy
2021-01-11  9:25         ` Christian König
2021-01-11 15:05           ` Alex Deucher
2021-01-12 10:55             ` Christian König

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7c8cba66-9846-145f-1e83-8499b8762b72@amd.com \
    --to=nirmodas@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=christian.koenig@amd.com \
    --cc=nirmoy.das@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.