* [PATCH 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
@ 2020-12-18 13:55 Nirmoy Das
2020-12-18 14:02 ` Christian König
0 siblings, 1 reply; 12+ messages in thread
From: Nirmoy Das @ 2020-12-18 13:55 UTC (permalink / raw)
To: amd-gfx; +Cc: Nirmoy Das, Christian.Koenig
Use amdgpu_sa_bo instead of amdgpu_bo.
Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
3 files changed, 45 insertions(+), 75 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..3c723e25dd5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return 0;
}
-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo *bo,
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
@@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unpin(bo);
-
- if (!ring->adev->uvd.address_64_bit) {
- struct ttm_operation_ctx ctx = { true, false };
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- goto err;
- }
-
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
@@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
ib->ptr[0] = data[0];
ib->ptr[1] = addr;
ib->ptr[2] = data[1];
@@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false,
- msecs_to_jiffies(10));
- if (r == 0)
- r = -ETIMEDOUT;
- if (r < 0)
- goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
} else {
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
- AMDGPU_SYNC_ALWAYS,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r)
- goto err_free;
-
r = amdgpu_job_submit(job, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
}
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
if (fence)
*fence = dma_fence_get(f);
@@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_job_free(job);
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
return r;
}
@@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_sa_bo *bo = NULL;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, (void **)&msg);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ &bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(bo);
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000);
@@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_sa_bo *bo = NULL;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, (void **)&msg);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ &bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(bo);
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0d5284b936e4..bce29d6975d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence);
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
@@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
@@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_sa_bo *bo = NULL;
+ struct amdgpu_device *adev = ring->adev;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ &bo, 512, PAGE_SIZE);
if (r)
return r;
@@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
+ amdgpu_sa_bo_free(adev, &bo, fence);
dma_fence_put(fence);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4a77c7424dfc..ae4a80724ce2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
@@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
+
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
if (fence)
*fence = dma_fence_get(f);
@@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
amdgpu_job_free(job);
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
return r;
}
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_sa_bo **bo)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
*bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(*bo);
+
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001);
@@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
}
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_sa_bo **bo)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
*bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(*bo);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018);
@@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_sa_bo *bo;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
@@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
@@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
@@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
if (fence)
*fence = dma_fence_get(f);
@@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
amdgpu_job_free(job);
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
return r;
}
int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_sa_bo *bo;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
--
2.29.2
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2020-12-18 13:55 [PATCH 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests Nirmoy Das
@ 2020-12-18 14:02 ` Christian König
2020-12-18 14:40 ` [PATCH v2 " Nirmoy Das
0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2020-12-18 14:02 UTC (permalink / raw)
To: Nirmoy Das, amd-gfx
Am 18.12.20 um 14:55 schrieb Nirmoy Das:
> Use amdgpu_sa_bo instead of amdgpu_bo.
>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
> 3 files changed, 45 insertions(+), 75 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 8b989670ed66..3c723e25dd5d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
> return 0;
> }
>
> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo *bo,
> bool direct, struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> unsigned offset_idx = 0;
> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>
> - amdgpu_bo_kunmap(bo);
> - amdgpu_bo_unpin(bo);
> -
> - if (!ring->adev->uvd.address_64_bit) {
> - struct ttm_operation_ctx ctx = { true, false };
> -
> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
> - amdgpu_uvd_force_into_uvd_segment(bo);
> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> - if (r)
> - goto err;
> - }
> -
> r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
> AMDGPU_IB_POOL_DELAYED, &job);
> if (r)
> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> ib->ptr[0] = data[0];
> ib->ptr[1] = addr;
> ib->ptr[2] = data[1];
> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> ib->length_dw = 16;
>
> if (direct) {
> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> - true, false,
> - msecs_to_jiffies(10));
> - if (r == 0)
> - r = -ETIMEDOUT;
> - if (r < 0)
> - goto err_free;
> -
> r = amdgpu_job_submit_direct(job, ring, &f);
> if (r)
> goto err_free;
> } else {
> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
> - AMDGPU_SYNC_ALWAYS,
> - AMDGPU_FENCE_OWNER_UNDEFINED);
> - if (r)
> - goto err_free;
> -
> r = amdgpu_job_submit(job, &adev->uvd.entity,
> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
> if (r)
> goto err_free;
> }
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
The fence is undefined here and should probably be replaced with NULL.
Same for other places.
> return r;
> }
>
> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo = NULL;
> uint32_t *msg;
> int r, i;
>
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(bo);
> /* stitch together an UVD create msg */
> msg[0] = cpu_to_le32(0x00000de4);
> msg[1] = cpu_to_le32(0x00000000);
> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> bool direct, struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo = NULL;
Please don't set the variable to NULL. The compiler should complain when
we use this uninitialized. Same for other places.
Apart from that looks good to me,
Christian.
> uint32_t *msg;
> int r, i;
>
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(bo);
> /* stitch together an UVD destroy msg */
> msg[0] = cpu_to_le32(0x00000de4);
> msg[1] = cpu_to_le32(0x00000002);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index 0d5284b936e4..bce29d6975d3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>
> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence);
> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> bool direct, struct dma_fence **fence);
> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
> * Open up a stream for HW test
> */
> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> const unsigned ib_size_dw = 1024;
> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
>
> ib = &job->ibs[0];
>
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
>
> /* stitch together an VCE create msg */
> ib->length_dw = 0;
> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo = NULL;
> + struct amdgpu_device *adev = ring->adev;
> long r;
>
> /* skip vce ring1/2 ib test for now, since it's not reliable */
> if (ring != &ring->adev->vce.ring[0])
> return 0;
>
> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, NULL);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 512, PAGE_SIZE);
> if (r)
> return r;
>
> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> r = 0;
>
> error:
> + amdgpu_sa_bo_free(adev, &bo, fence);
> dma_fence_put(fence);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> return r;
> }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index 4a77c7424dfc..ae4a80724ce2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
> }
>
> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> goto err;
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> +
> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
> ib->ptr[1] = addr;
> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> if (r)
> goto err_free;
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
> return r;
> }
>
> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo **bo)
> + struct amdgpu_sa_bo **bo)
> {
> struct amdgpu_device *adev = ring->adev;
> uint32_t *msg;
> int r, i;
>
> *bo = NULL;
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - bo, NULL, (void **)&msg);
> +
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(*bo);
> +
> msg[0] = cpu_to_le32(0x00000028);
> msg[1] = cpu_to_le32(0x00000038);
> msg[2] = cpu_to_le32(0x00000001);
> @@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
> }
>
> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo **bo)
> + struct amdgpu_sa_bo **bo)
> {
> struct amdgpu_device *adev = ring->adev;
> uint32_t *msg;
> int r, i;
>
> *bo = NULL;
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>
> msg[0] = cpu_to_le32(0x00000028);
> msg[1] = cpu_to_le32(0x00000018);
> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo;
> + struct amdgpu_sa_bo *bo;
> long r;
>
> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> }
>
> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> goto err;
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> ib->length_dw = 0;
>
> ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> if (r)
> goto err_free;
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
> return r;
> }
>
> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo;
> + struct amdgpu_sa_bo *bo;
> long r;
>
> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2020-12-18 14:02 ` Christian König
@ 2020-12-18 14:40 ` Nirmoy Das
2021-01-08 12:20 ` Nirmoy
2021-01-08 15:25 ` Christian König
0 siblings, 2 replies; 12+ messages in thread
From: Nirmoy Das @ 2020-12-18 14:40 UTC (permalink / raw)
To: amd-gfx; +Cc: Nirmoy Das, Christian.Koenig
Use amdgpu_sa_bo instead of amdgpu_bo.
v2:
* do not initialize bo to get hint from compiler for -Wuninitialized
* pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
3 files changed, 45 insertions(+), 75 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..13450a3df044 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return 0;
}
-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo *bo,
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
@@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unpin(bo);
-
- if (!ring->adev->uvd.address_64_bit) {
- struct ttm_operation_ctx ctx = { true, false };
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- goto err;
- }
-
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
@@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
ib->ptr[0] = data[0];
ib->ptr[1] = addr;
ib->ptr[2] = data[1];
@@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false,
- msecs_to_jiffies(10));
- if (r == 0)
- r = -ETIMEDOUT;
- if (r < 0)
- goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
} else {
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
- AMDGPU_SYNC_ALWAYS,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r)
- goto err_free;
-
r = amdgpu_job_submit(job, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
}
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
if (fence)
*fence = dma_fence_get(f);
@@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_job_free(job);
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, NULL);
return r;
}
@@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_sa_bo *bo;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, (void **)&msg);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ &bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(bo);
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000);
@@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_sa_bo *bo;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, (void **)&msg);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ &bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(bo);
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0d5284b936e4..bce29d6975d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence);
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
@@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
@@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_sa_bo *bo = NULL;
+ struct amdgpu_device *adev = ring->adev;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ &bo, 512, PAGE_SIZE);
if (r)
return r;
@@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
+ amdgpu_sa_bo_free(adev, &bo, fence);
dma_fence_put(fence);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4a77c7424dfc..1e46b2f895ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
@@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
+
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
if (fence)
*fence = dma_fence_get(f);
@@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
amdgpu_job_free(job);
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, NULL);
return r;
}
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_sa_bo **bo)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
*bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(*bo);
+
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001);
@@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
}
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_sa_bo **bo)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
*bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
+ bo, 1024, PAGE_SIZE);
+
if (r)
return r;
+ msg = amdgpu_sa_bo_cpu_addr(*bo);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018);
@@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_sa_bo *bo;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
@@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
+ struct amdgpu_sa_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
@@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = amdgpu_sa_bo_gpu_addr(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
@@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, f);
if (fence)
*fence = dma_fence_get(f);
@@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
amdgpu_job_free(job);
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_sa_bo_free(adev, &bo, NULL);
return r;
}
int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_sa_bo *bo;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
--
2.29.2
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2020-12-18 14:40 ` [PATCH v2 " Nirmoy Das
@ 2021-01-08 12:20 ` Nirmoy
2021-01-08 13:38 ` Christian König
2021-01-08 15:25 ` Christian König
1 sibling, 1 reply; 12+ messages in thread
From: Nirmoy @ 2021-01-08 12:20 UTC (permalink / raw)
To: Nirmoy Das, amd-gfx; +Cc: Christian.Koenig
Ping!
On 12/18/20 3:40 PM, Nirmoy Das wrote:
> Use amdgpu_sa_bo instead of amdgpu_bo.
>
> v2:
> * do not initialize bo to get hint from compiler for -Wuninitialized
> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
> 3 files changed, 45 insertions(+), 75 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 8b989670ed66..13450a3df044 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
> return 0;
> }
>
> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo *bo,
> bool direct, struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> unsigned offset_idx = 0;
> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>
> - amdgpu_bo_kunmap(bo);
> - amdgpu_bo_unpin(bo);
> -
> - if (!ring->adev->uvd.address_64_bit) {
> - struct ttm_operation_ctx ctx = { true, false };
> -
> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
> - amdgpu_uvd_force_into_uvd_segment(bo);
> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> - if (r)
> - goto err;
> - }
> -
> r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
> AMDGPU_IB_POOL_DELAYED, &job);
> if (r)
> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> ib->ptr[0] = data[0];
> ib->ptr[1] = addr;
> ib->ptr[2] = data[1];
> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> ib->length_dw = 16;
>
> if (direct) {
> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> - true, false,
> - msecs_to_jiffies(10));
> - if (r == 0)
> - r = -ETIMEDOUT;
> - if (r < 0)
> - goto err_free;
> -
> r = amdgpu_job_submit_direct(job, ring, &f);
> if (r)
> goto err_free;
> } else {
> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
> - AMDGPU_SYNC_ALWAYS,
> - AMDGPU_FENCE_OWNER_UNDEFINED);
> - if (r)
> - goto err_free;
> -
> r = amdgpu_job_submit(job, &adev->uvd.entity,
> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
> if (r)
> goto err_free;
> }
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, NULL);
> return r;
> }
>
> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo;
> uint32_t *msg;
> int r, i;
>
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(bo);
> /* stitch together an UVD create msg */
> msg[0] = cpu_to_le32(0x00000de4);
> msg[1] = cpu_to_le32(0x00000000);
> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> bool direct, struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo;
> uint32_t *msg;
> int r, i;
>
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(bo);
> /* stitch together an UVD destroy msg */
> msg[0] = cpu_to_le32(0x00000de4);
> msg[1] = cpu_to_le32(0x00000002);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index 0d5284b936e4..bce29d6975d3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>
> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence);
> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> bool direct, struct dma_fence **fence);
> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
> * Open up a stream for HW test
> */
> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> const unsigned ib_size_dw = 1024;
> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
>
> ib = &job->ibs[0];
>
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
>
> /* stitch together an VCE create msg */
> ib->length_dw = 0;
> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo = NULL;
> + struct amdgpu_device *adev = ring->adev;
> long r;
>
> /* skip vce ring1/2 ib test for now, since it's not reliable */
> if (ring != &ring->adev->vce.ring[0])
> return 0;
>
> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, NULL);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 512, PAGE_SIZE);
> if (r)
> return r;
>
> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> r = 0;
>
> error:
> + amdgpu_sa_bo_free(adev, &bo, fence);
> dma_fence_put(fence);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> return r;
> }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index 4a77c7424dfc..1e46b2f895ba 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
> }
>
> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> goto err;
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> +
> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
> ib->ptr[1] = addr;
> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> if (r)
> goto err_free;
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, NULL);
> return r;
> }
>
> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo **bo)
> + struct amdgpu_sa_bo **bo)
> {
> struct amdgpu_device *adev = ring->adev;
> uint32_t *msg;
> int r, i;
>
> *bo = NULL;
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - bo, NULL, (void **)&msg);
> +
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(*bo);
> +
> msg[0] = cpu_to_le32(0x00000028);
> msg[1] = cpu_to_le32(0x00000038);
> msg[2] = cpu_to_le32(0x00000001);
> @@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
> }
>
> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo **bo)
> + struct amdgpu_sa_bo **bo)
> {
> struct amdgpu_device *adev = ring->adev;
> uint32_t *msg;
> int r, i;
>
> *bo = NULL;
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>
> msg[0] = cpu_to_le32(0x00000028);
> msg[1] = cpu_to_le32(0x00000018);
> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo;
> + struct amdgpu_sa_bo *bo;
> long r;
>
> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> }
>
> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> goto err;
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> ib->length_dw = 0;
>
> ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> if (r)
> goto err_free;
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, NULL);
> return r;
> }
>
> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo;
> + struct amdgpu_sa_bo *bo;
> long r;
>
> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
> --
> 2.29.2
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2021-01-08 12:20 ` Nirmoy
@ 2021-01-08 13:38 ` Christian König
2021-01-08 15:11 ` Christian König
0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2021-01-08 13:38 UTC (permalink / raw)
To: Nirmoy, Nirmoy Das, amd-gfx
Ah, right I wanted to test this on SI based hardware.
Can take care of that, just give me a few hours.
Thanks,
Christian.
Am 08.01.21 um 13:20 schrieb Nirmoy:
> Ping!
>
> On 12/18/20 3:40 PM, Nirmoy Das wrote:
>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>
>> v2:
>> * do not initialize bo to get hint from compiler for -Wuninitialized
>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>> 3 files changed, 45 insertions(+), 75 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> index 8b989670ed66..13450a3df044 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>> return 0;
>> }
>>
>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>> amdgpu_bo *bo,
>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>> amdgpu_sa_bo *bo,
>> bool direct, struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> unsigned offset_idx = 0;
>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>
>> - amdgpu_bo_kunmap(bo);
>> - amdgpu_bo_unpin(bo);
>> -
>> - if (!ring->adev->uvd.address_64_bit) {
>> - struct ttm_operation_ctx ctx = { true, false };
>> -
>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>> - amdgpu_uvd_force_into_uvd_segment(bo);
>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>> - if (r)
>> - goto err;
>> - }
>> -
>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
>> AMDGPU_IB_POOL_DIRECT :
>> AMDGPU_IB_POOL_DELAYED, &job);
>> if (r)
>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> ib->ptr[0] = data[0];
>> ib->ptr[1] = addr;
>> ib->ptr[2] = data[1];
>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> ib->length_dw = 16;
>>
>> if (direct) {
>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>> - true, false,
>> - msecs_to_jiffies(10));
>> - if (r == 0)
>> - r = -ETIMEDOUT;
>> - if (r < 0)
>> - goto err_free;
>> -
>> r = amdgpu_job_submit_direct(job, ring, &f);
>> if (r)
>> goto err_free;
>> } else {
>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>> - AMDGPU_SYNC_ALWAYS,
>> - AMDGPU_FENCE_OWNER_UNDEFINED);
>> - if (r)
>> - goto err_free;
>> -
>> r = amdgpu_job_submit(job, &adev->uvd.entity,
>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>> if (r)
>> goto err_free;
>> }
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo;
>> uint32_t *msg;
>> int r, i;
>>
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>> /* stitch together an UVD create msg */
>> msg[0] = cpu_to_le32(0x00000de4);
>> msg[1] = cpu_to_le32(0x00000000);
>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>> bool direct, struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo;
>> uint32_t *msg;
>> int r, i;
>>
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>> /* stitch together an UVD destroy msg */
>> msg[0] = cpu_to_le32(0x00000de4);
>> msg[1] = cpu_to_le32(0x00000002);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> index 0d5284b936e4..bce29d6975d3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>
>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence);
>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> bool direct, struct dma_fence **fence);
>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device
>> *adev, struct drm_file *filp)
>> * Open up a stream for HW test
>> */
>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> const unsigned ib_size_dw = 1024;
>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>>
>> ib = &job->ibs[0];
>>
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>
>> /* stitch together an VCE create msg */
>> ib->length_dw = 0;
>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
>> amdgpu_ring *ring)
>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo = NULL;
>> + struct amdgpu_device *adev = ring->adev;
>> long r;
>>
>> /* skip vce ring1/2 ib test for now, since it's not reliable */
>> if (ring != &ring->adev->vce.ring[0])
>> return 0;
>>
>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, NULL);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 512, PAGE_SIZE);
>> if (r)
>> return r;
>>
>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring
>> *ring, long timeout)
>> r = 0;
>>
>> error:
>> + amdgpu_sa_bo_free(adev, &bo, fence);
>> dma_fence_put(fence);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> return r;
>> }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> index 4a77c7424dfc..1e46b2f895ba 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
>> amdgpu_ring *ring)
>> }
>>
>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> +
>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>> ib->ptr[1] = addr;
>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_sa_bo **bo)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> +
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>> +
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000038);
>> msg[2] = cpu_to_le32(0x00000001);
>> @@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t hand
>> }
>>
>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_sa_bo **bo)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000018);
>> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct
>> amdgpu_ring *ring, uint32_t han
>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
>> timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_sa_bo *bo;
>> long r;
>>
>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
>> amdgpu_ring *ring, long timeout)
>> }
>>
>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> ib->length_dw = 0;
>>
>> ib->ptr[ib->length_dw++] = sizeof(struct
>> amdgpu_vcn_decode_buffer) + 8;
>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
>> timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_sa_bo *bo;
>> long r;
>>
>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> --
>> 2.29.2
>>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2021-01-08 13:38 ` Christian König
@ 2021-01-08 15:11 ` Christian König
2021-01-08 15:37 ` Nirmoy
0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2021-01-08 15:11 UTC (permalink / raw)
To: Nirmoy, Nirmoy Das, amd-gfx
Mhm, looks like that doesn't work on SI.
Most likely an alignment issue. Going to investigate.
Regards,
Christian.
Am 08.01.21 um 14:38 schrieb Christian König:
> Ah, right I wanted to test this on SI based hardware.
>
> Can take care of that, just give me a few hours.
>
> Thanks,
> Christian.
>
> Am 08.01.21 um 13:20 schrieb Nirmoy:
>> Ping!
>>
>> On 12/18/20 3:40 PM, Nirmoy Das wrote:
>>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>>
>>> v2:
>>> * do not initialize bo to get hint from compiler for -Wuninitialized
>>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>>>
>>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>>> ---
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56
>>> +++++++------------------
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>>> 3 files changed, 45 insertions(+), 75 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> index 8b989670ed66..13450a3df044 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
>>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>>> return 0;
>>> }
>>>
>>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>> amdgpu_bo *bo,
>>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>> amdgpu_sa_bo *bo,
>>> bool direct, struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> unsigned offset_idx = 0;
>>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>>
>>> - amdgpu_bo_kunmap(bo);
>>> - amdgpu_bo_unpin(bo);
>>> -
>>> - if (!ring->adev->uvd.address_64_bit) {
>>> - struct ttm_operation_ctx ctx = { true, false };
>>> -
>>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>>> - amdgpu_uvd_force_into_uvd_segment(bo);
>>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>>> - if (r)
>>> - goto err;
>>> - }
>>> -
>>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
>>> AMDGPU_IB_POOL_DIRECT :
>>> AMDGPU_IB_POOL_DELAYED, &job);
>>> if (r)
>>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>>
>>> ib = &job->ibs[0];
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>> ib->ptr[0] = data[0];
>>> ib->ptr[1] = addr;
>>> ib->ptr[2] = data[1];
>>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> ib->length_dw = 16;
>>>
>>> if (direct) {
>>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>>> - true, false,
>>> - msecs_to_jiffies(10));
>>> - if (r == 0)
>>> - r = -ETIMEDOUT;
>>> - if (r < 0)
>>> - goto err_free;
>>> -
>>> r = amdgpu_job_submit_direct(job, ring, &f);
>>> if (r)
>>> goto err_free;
>>> } else {
>>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>>> - AMDGPU_SYNC_ALWAYS,
>>> - AMDGPU_FENCE_OWNER_UNDEFINED);
>>> - if (r)
>>> - goto err_free;
>>> -
>>> r = amdgpu_job_submit(job, &adev->uvd.entity,
>>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>>> if (r)
>>> goto err_free;
>>> }
>>>
>>> - amdgpu_bo_fence(bo, f, false);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>
>>> if (fence)
>>> *fence = dma_fence_get(f);
>>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> amdgpu_job_free(job);
>>>
>>> err:
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>> return r;
>>> }
>>>
>>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
>>> amdgpu_ring *ring, uint32_t handle,
>>> struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> - struct amdgpu_bo *bo = NULL;
>>> + struct amdgpu_sa_bo *bo;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - &bo, NULL, (void **)&msg);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + &bo, 1024, PAGE_SIZE);
>>> +
>>> if (r)
>>> return r;
>>>
>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>> /* stitch together an UVD create msg */
>>> msg[0] = cpu_to_le32(0x00000de4);
>>> msg[1] = cpu_to_le32(0x00000000);
>>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
>>> amdgpu_ring *ring, uint32_t handle,
>>> bool direct, struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> - struct amdgpu_bo *bo = NULL;
>>> + struct amdgpu_sa_bo *bo;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - &bo, NULL, (void **)&msg);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + &bo, 1024, PAGE_SIZE);
>>> +
>>> if (r)
>>> return r;
>>>
>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>> /* stitch together an UVD destroy msg */
>>> msg[0] = cpu_to_le32(0x00000de4);
>>> msg[1] = cpu_to_le32(0x00000002);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> index 0d5284b936e4..bce29d6975d3 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>>
>>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence);
>>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> bool direct, struct dma_fence **fence);
>>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct
>>> amdgpu_device *adev, struct drm_file *filp)
>>> * Open up a stream for HW test
>>> */
>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence)
>>> {
>>> const unsigned ib_size_dw = 1024;
>>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
>>> amdgpu_ring *ring, uint32_t handle,
>>>
>>> ib = &job->ibs[0];
>>>
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>
>>> /* stitch together an VCE create msg */
>>> ib->length_dw = 0;
>>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
>>> amdgpu_ring *ring)
>>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>>> {
>>> struct dma_fence *fence = NULL;
>>> - struct amdgpu_bo *bo = NULL;
>>> + struct amdgpu_sa_bo *bo = NULL;
>>> + struct amdgpu_device *adev = ring->adev;
>>> long r;
>>>
>>> /* skip vce ring1/2 ib test for now, since it's not reliable */
>>> if (ring != &ring->adev->vce.ring[0])
>>> return 0;
>>>
>>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - &bo, NULL, NULL);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + &bo, 512, PAGE_SIZE);
>>> if (r)
>>> return r;
>>>
>>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring
>>> *ring, long timeout)
>>> r = 0;
>>>
>>> error:
>>> + amdgpu_sa_bo_free(adev, &bo, fence);
>>> dma_fence_put(fence);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> return r;
>>> }
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> index 4a77c7424dfc..1e46b2f895ba 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
>>> amdgpu_ring *ring)
>>> }
>>>
>>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
>>> amdgpu_ring *ring,
>>> goto err;
>>>
>>> ib = &job->ibs[0];
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>> +
>>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>>> ib->ptr[1] = addr;
>>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
>>> amdgpu_ring *ring,
>>> if (r)
>>> goto err_free;
>>>
>>> - amdgpu_bo_fence(bo, f, false);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>
>>> if (fence)
>>> *fence = dma_fence_get(f);
>>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
>>> amdgpu_ring *ring,
>>> amdgpu_job_free(job);
>>>
>>> err:
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>> return r;
>>> }
>>>
>>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> - struct amdgpu_bo **bo)
>>> + struct amdgpu_sa_bo **bo)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> *bo = NULL;
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - bo, NULL, (void **)&msg);
>>> +
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + bo, 1024, PAGE_SIZE);
>>> +
>>> if (r)
>>> return r;
>>>
>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>> +
>>> msg[0] = cpu_to_le32(0x00000028);
>>> msg[1] = cpu_to_le32(0x00000038);
>>> msg[2] = cpu_to_le32(0x00000001);
>>> @@ -575,18 +576,19 @@ static int
>>> amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>>> }
>>>
>>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring
>>> *ring, uint32_t handle,
>>> - struct amdgpu_bo **bo)
>>> + struct amdgpu_sa_bo **bo)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> *bo = NULL;
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - bo, NULL, (void **)&msg);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + bo, 1024, PAGE_SIZE);
>>> +
>>> if (r)
>>> return r;
>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>
>>> msg[0] = cpu_to_le32(0x00000028);
>>> msg[1] = cpu_to_le32(0x00000018);
>>> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct
>>> amdgpu_ring *ring, uint32_t han
>>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
>>> timeout)
>>> {
>>> struct dma_fence *fence = NULL;
>>> - struct amdgpu_bo *bo;
>>> + struct amdgpu_sa_bo *bo;
>>> long r;
>>>
>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
>>> amdgpu_ring *ring, long timeout)
>>> }
>>>
>>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence)
>>> {
>>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>> amdgpu_ring *ring,
>>> goto err;
>>>
>>> ib = &job->ibs[0];
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>> ib->length_dw = 0;
>>>
>>> ib->ptr[ib->length_dw++] = sizeof(struct
>>> amdgpu_vcn_decode_buffer) + 8;
>>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>> amdgpu_ring *ring,
>>> if (r)
>>> goto err_free;
>>>
>>> - amdgpu_bo_fence(bo, f, false);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>
>>> if (fence)
>>> *fence = dma_fence_get(f);
>>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>> amdgpu_ring *ring,
>>> amdgpu_job_free(job);
>>>
>>> err:
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>> return r;
>>> }
>>>
>>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
>>> timeout)
>>> {
>>> struct dma_fence *fence = NULL;
>>> - struct amdgpu_bo *bo;
>>> + struct amdgpu_sa_bo *bo;
>>> long r;
>>>
>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>> --
>>> 2.29.2
>>>
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2021-01-08 15:11 ` Christian König
@ 2021-01-08 15:37 ` Nirmoy
0 siblings, 0 replies; 12+ messages in thread
From: Nirmoy @ 2021-01-08 15:37 UTC (permalink / raw)
To: Christian König, Nirmoy Das, amd-gfx
On 1/8/21 4:11 PM, Christian König wrote:
> Mhm, looks like that doesn't work on SI.
Thanks, sorry I couldn't verify that :/
Nirmoy
>
> Most likely an alignment issue. Going to investigate.
>
> Regards,
> Christian.
>
> Am 08.01.21 um 14:38 schrieb Christian König:
>> Ah, right I wanted to test this on SI based hardware.
>>
>> Can take care of that, just give me a few hours.
>>
>> Thanks,
>> Christian.
>>
>> Am 08.01.21 um 13:20 schrieb Nirmoy:
>>> Ping!
>>>
>>> On 12/18/20 3:40 PM, Nirmoy Das wrote:
>>>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>>>
>>>> v2:
>>>> * do not initialize bo to get hint from compiler for -Wuninitialized
>>>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>>>>
>>>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>>>> ---
>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56
>>>> +++++++------------------
>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>>>> 3 files changed, 45 insertions(+), 75 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>> index 8b989670ed66..13450a3df044 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
>>>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>>>> return 0;
>>>> }
>>>>
>>>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>>> amdgpu_bo *bo,
>>>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>>> amdgpu_sa_bo *bo,
>>>> bool direct, struct dma_fence **fence)
>>>> {
>>>> struct amdgpu_device *adev = ring->adev;
>>>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>> unsigned offset_idx = 0;
>>>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>>>
>>>> - amdgpu_bo_kunmap(bo);
>>>> - amdgpu_bo_unpin(bo);
>>>> -
>>>> - if (!ring->adev->uvd.address_64_bit) {
>>>> - struct ttm_operation_ctx ctx = { true, false };
>>>> -
>>>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>>>> - amdgpu_uvd_force_into_uvd_segment(bo);
>>>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>>>> - if (r)
>>>> - goto err;
>>>> - }
>>>> -
>>>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
>>>> AMDGPU_IB_POOL_DIRECT :
>>>> AMDGPU_IB_POOL_DELAYED, &job);
>>>> if (r)
>>>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>>>
>>>> ib = &job->ibs[0];
>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>> ib->ptr[0] = data[0];
>>>> ib->ptr[1] = addr;
>>>> ib->ptr[2] = data[1];
>>>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>> ib->length_dw = 16;
>>>>
>>>> if (direct) {
>>>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>>>> - true, false,
>>>> - msecs_to_jiffies(10));
>>>> - if (r == 0)
>>>> - r = -ETIMEDOUT;
>>>> - if (r < 0)
>>>> - goto err_free;
>>>> -
>>>> r = amdgpu_job_submit_direct(job, ring, &f);
>>>> if (r)
>>>> goto err_free;
>>>> } else {
>>>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>>>> - AMDGPU_SYNC_ALWAYS,
>>>> - AMDGPU_FENCE_OWNER_UNDEFINED);
>>>> - if (r)
>>>> - goto err_free;
>>>> -
>>>> r = amdgpu_job_submit(job, &adev->uvd.entity,
>>>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>>>> if (r)
>>>> goto err_free;
>>>> }
>>>>
>>>> - amdgpu_bo_fence(bo, f, false);
>>>> - amdgpu_bo_unreserve(bo);
>>>> - amdgpu_bo_unref(&bo);
>>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>>
>>>> if (fence)
>>>> *fence = dma_fence_get(f);
>>>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>> amdgpu_job_free(job);
>>>>
>>>> err:
>>>> - amdgpu_bo_unreserve(bo);
>>>> - amdgpu_bo_unref(&bo);
>>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>>> return r;
>>>> }
>>>>
>>>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
>>>> amdgpu_ring *ring, uint32_t handle,
>>>> struct dma_fence **fence)
>>>> {
>>>> struct amdgpu_device *adev = ring->adev;
>>>> - struct amdgpu_bo *bo = NULL;
>>>> + struct amdgpu_sa_bo *bo;
>>>> uint32_t *msg;
>>>> int r, i;
>>>>
>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>> - &bo, NULL, (void **)&msg);
>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> + &bo, 1024, PAGE_SIZE);
>>>> +
>>>> if (r)
>>>> return r;
>>>>
>>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>>> /* stitch together an UVD create msg */
>>>> msg[0] = cpu_to_le32(0x00000de4);
>>>> msg[1] = cpu_to_le32(0x00000000);
>>>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
>>>> amdgpu_ring *ring, uint32_t handle,
>>>> bool direct, struct dma_fence **fence)
>>>> {
>>>> struct amdgpu_device *adev = ring->adev;
>>>> - struct amdgpu_bo *bo = NULL;
>>>> + struct amdgpu_sa_bo *bo;
>>>> uint32_t *msg;
>>>> int r, i;
>>>>
>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>> - &bo, NULL, (void **)&msg);
>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> + &bo, 1024, PAGE_SIZE);
>>>> +
>>>> if (r)
>>>> return r;
>>>>
>>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>>> /* stitch together an UVD destroy msg */
>>>> msg[0] = cpu_to_le32(0x00000de4);
>>>> msg[1] = cpu_to_le32(0x00000002);
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>> index 0d5284b936e4..bce29d6975d3 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>>>
>>>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>>> uint32_t handle,
>>>> - struct amdgpu_bo *bo,
>>>> + struct amdgpu_sa_bo *bo,
>>>> struct dma_fence **fence);
>>>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
>>>> uint32_t handle,
>>>> bool direct, struct dma_fence **fence);
>>>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct
>>>> amdgpu_device *adev, struct drm_file *filp)
>>>> * Open up a stream for HW test
>>>> */
>>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>>> uint32_t handle,
>>>> - struct amdgpu_bo *bo,
>>>> + struct amdgpu_sa_bo *bo,
>>>> struct dma_fence **fence)
>>>> {
>>>> const unsigned ib_size_dw = 1024;
>>>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
>>>> amdgpu_ring *ring, uint32_t handle,
>>>>
>>>> ib = &job->ibs[0];
>>>>
>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>
>>>> /* stitch together an VCE create msg */
>>>> ib->length_dw = 0;
>>>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
>>>> amdgpu_ring *ring)
>>>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>>>> {
>>>> struct dma_fence *fence = NULL;
>>>> - struct amdgpu_bo *bo = NULL;
>>>> + struct amdgpu_sa_bo *bo = NULL;
>>>> + struct amdgpu_device *adev = ring->adev;
>>>> long r;
>>>>
>>>> /* skip vce ring1/2 ib test for now, since it's not reliable */
>>>> if (ring != &ring->adev->vce.ring[0])
>>>> return 0;
>>>>
>>>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>> - &bo, NULL, NULL);
>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> + &bo, 512, PAGE_SIZE);
>>>> if (r)
>>>> return r;
>>>>
>>>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct
>>>> amdgpu_ring *ring, long timeout)
>>>> r = 0;
>>>>
>>>> error:
>>>> + amdgpu_sa_bo_free(adev, &bo, fence);
>>>> dma_fence_put(fence);
>>>> - amdgpu_bo_unreserve(bo);
>>>> - amdgpu_bo_unref(&bo);
>>>> return r;
>>>> }
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>> index 4a77c7424dfc..1e46b2f895ba 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
>>>> amdgpu_ring *ring)
>>>> }
>>>>
>>>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>>>> - struct amdgpu_bo *bo,
>>>> + struct amdgpu_sa_bo *bo,
>>>> struct dma_fence **fence)
>>>> {
>>>> struct amdgpu_device *adev = ring->adev;
>>>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
>>>> amdgpu_ring *ring,
>>>> goto err;
>>>>
>>>> ib = &job->ibs[0];
>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>> +
>>>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>>>> ib->ptr[1] = addr;
>>>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>>>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
>>>> amdgpu_ring *ring,
>>>> if (r)
>>>> goto err_free;
>>>>
>>>> - amdgpu_bo_fence(bo, f, false);
>>>> - amdgpu_bo_unreserve(bo);
>>>> - amdgpu_bo_unref(&bo);
>>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>>
>>>> if (fence)
>>>> *fence = dma_fence_get(f);
>>>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
>>>> amdgpu_ring *ring,
>>>> amdgpu_job_free(job);
>>>>
>>>> err:
>>>> - amdgpu_bo_unreserve(bo);
>>>> - amdgpu_bo_unref(&bo);
>>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>>> return r;
>>>> }
>>>>
>>>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring
>>>> *ring, uint32_t handle,
>>>> - struct amdgpu_bo **bo)
>>>> + struct amdgpu_sa_bo **bo)
>>>> {
>>>> struct amdgpu_device *adev = ring->adev;
>>>> uint32_t *msg;
>>>> int r, i;
>>>>
>>>> *bo = NULL;
>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>> - bo, NULL, (void **)&msg);
>>>> +
>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> + bo, 1024, PAGE_SIZE);
>>>> +
>>>> if (r)
>>>> return r;
>>>>
>>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>> +
>>>> msg[0] = cpu_to_le32(0x00000028);
>>>> msg[1] = cpu_to_le32(0x00000038);
>>>> msg[2] = cpu_to_le32(0x00000001);
>>>> @@ -575,18 +576,19 @@ static int
>>>> amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>>>> }
>>>>
>>>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring
>>>> *ring, uint32_t handle,
>>>> - struct amdgpu_bo **bo)
>>>> + struct amdgpu_sa_bo **bo)
>>>> {
>>>> struct amdgpu_device *adev = ring->adev;
>>>> uint32_t *msg;
>>>> int r, i;
>>>>
>>>> *bo = NULL;
>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>> - bo, NULL, (void **)&msg);
>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>> + bo, 1024, PAGE_SIZE);
>>>> +
>>>> if (r)
>>>> return r;
>>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>>
>>>> msg[0] = cpu_to_le32(0x00000028);
>>>> msg[1] = cpu_to_le32(0x00000018);
>>>> @@ -603,7 +605,7 @@ static int
>>>> amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>>>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
>>>> timeout)
>>>> {
>>>> struct dma_fence *fence = NULL;
>>>> - struct amdgpu_bo *bo;
>>>> + struct amdgpu_sa_bo *bo;
>>>> long r;
>>>>
>>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
>>>> amdgpu_ring *ring, long timeout)
>>>> }
>>>>
>>>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>>>> - struct amdgpu_bo *bo,
>>>> + struct amdgpu_sa_bo *bo,
>>>> struct dma_fence **fence)
>>>> {
>>>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>>>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>>> amdgpu_ring *ring,
>>>> goto err;
>>>>
>>>> ib = &job->ibs[0];
>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>> ib->length_dw = 0;
>>>>
>>>> ib->ptr[ib->length_dw++] = sizeof(struct
>>>> amdgpu_vcn_decode_buffer) + 8;
>>>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>>> amdgpu_ring *ring,
>>>> if (r)
>>>> goto err_free;
>>>>
>>>> - amdgpu_bo_fence(bo, f, false);
>>>> - amdgpu_bo_unreserve(bo);
>>>> - amdgpu_bo_unref(&bo);
>>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>>
>>>> if (fence)
>>>> *fence = dma_fence_get(f);
>>>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>>> amdgpu_ring *ring,
>>>> amdgpu_job_free(job);
>>>>
>>>> err:
>>>> - amdgpu_bo_unreserve(bo);
>>>> - amdgpu_bo_unref(&bo);
>>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>>> return r;
>>>> }
>>>>
>>>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
>>>> timeout)
>>>> {
>>>> struct dma_fence *fence = NULL;
>>>> - struct amdgpu_bo *bo;
>>>> + struct amdgpu_sa_bo *bo;
>>>> long r;
>>>>
>>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>>> --
>>>> 2.29.2
>>>>
>>
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2020-12-18 14:40 ` [PATCH v2 " Nirmoy Das
2021-01-08 12:20 ` Nirmoy
@ 2021-01-08 15:25 ` Christian König
2021-01-08 15:42 ` Nirmoy
1 sibling, 1 reply; 12+ messages in thread
From: Christian König @ 2021-01-08 15:25 UTC (permalink / raw)
To: Nirmoy Das, amd-gfx
Am 18.12.20 um 15:40 schrieb Nirmoy Das:
> Use amdgpu_sa_bo instead of amdgpu_bo.
>
> v2:
> * do not initialize bo to get hint from compiler for -Wuninitialized
> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
Found a major bug in this which is probably the reason why that never
worked before.
See below.
>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
> 3 files changed, 45 insertions(+), 75 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 8b989670ed66..13450a3df044 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
> return 0;
> }
>
> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_sa_bo *bo,
> bool direct, struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> unsigned offset_idx = 0;
> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>
> - amdgpu_bo_kunmap(bo);
> - amdgpu_bo_unpin(bo);
> -
> - if (!ring->adev->uvd.address_64_bit) {
> - struct ttm_operation_ctx ctx = { true, false };
> -
> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
> - amdgpu_uvd_force_into_uvd_segment(bo);
> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> - if (r)
> - goto err;
> - }
> -
> r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
> AMDGPU_IB_POOL_DELAYED, &job);
> if (r)
> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> ib->ptr[0] = data[0];
> ib->ptr[1] = addr;
> ib->ptr[2] = data[1];
> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> ib->length_dw = 16;
>
> if (direct) {
> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> - true, false,
> - msecs_to_jiffies(10));
> - if (r == 0)
> - r = -ETIMEDOUT;
> - if (r < 0)
> - goto err_free;
> -
> r = amdgpu_job_submit_direct(job, ring, &f);
> if (r)
> goto err_free;
> } else {
> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
> - AMDGPU_SYNC_ALWAYS,
> - AMDGPU_FENCE_OWNER_UNDEFINED);
> - if (r)
> - goto err_free;
> -
> r = amdgpu_job_submit(job, &adev->uvd.entity,
> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
> if (r)
> goto err_free;
> }
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, NULL);
> return r;
> }
>
> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo;
> uint32_t *msg;
> int r, i;
>
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 1024, PAGE_SIZE);
This 1024 here is wrong. The size is in bytes and not double words. So
this needs to be 4096.
This only worked because amdgpu_bo_create_reserved always allocated a
full page instead of 1024 bytes.
Same problem on all other occasions.
Additional to that the direct pool should only be used if we submit
directly, e.g. during a GPU reset.
Otherwise we need to use the normal pool.
Regards,
Christian.
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(bo);
> /* stitch together an UVD create msg */
> msg[0] = cpu_to_le32(0x00000de4);
> msg[1] = cpu_to_le32(0x00000000);
> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> bool direct, struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo;
> uint32_t *msg;
> int r, i;
>
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(bo);
> /* stitch together an UVD destroy msg */
> msg[0] = cpu_to_le32(0x00000de4);
> msg[1] = cpu_to_le32(0x00000002);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index 0d5284b936e4..bce29d6975d3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>
> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence);
> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> bool direct, struct dma_fence **fence);
> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
> * Open up a stream for HW test
> */
> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> const unsigned ib_size_dw = 1024;
> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
>
> ib = &job->ibs[0];
>
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
>
> /* stitch together an VCE create msg */
> ib->length_dw = 0;
> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo = NULL;
> + struct amdgpu_sa_bo *bo = NULL;
> + struct amdgpu_device *adev = ring->adev;
> long r;
>
> /* skip vce ring1/2 ib test for now, since it's not reliable */
> if (ring != &ring->adev->vce.ring[0])
> return 0;
>
> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - &bo, NULL, NULL);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + &bo, 512, PAGE_SIZE);
> if (r)
> return r;
>
> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> r = 0;
>
> error:
> + amdgpu_sa_bo_free(adev, &bo, fence);
> dma_fence_put(fence);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> return r;
> }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index 4a77c7424dfc..1e46b2f895ba 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
> }
>
> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> struct amdgpu_device *adev = ring->adev;
> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> goto err;
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> +
> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
> ib->ptr[1] = addr;
> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> if (r)
> goto err_free;
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, NULL);
> return r;
> }
>
> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo **bo)
> + struct amdgpu_sa_bo **bo)
> {
> struct amdgpu_device *adev = ring->adev;
> uint32_t *msg;
> int r, i;
>
> *bo = NULL;
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - bo, NULL, (void **)&msg);
> +
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
>
> + msg = amdgpu_sa_bo_cpu_addr(*bo);
> +
> msg[0] = cpu_to_le32(0x00000028);
> msg[1] = cpu_to_le32(0x00000038);
> msg[2] = cpu_to_le32(0x00000001);
> @@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
> }
>
> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
> - struct amdgpu_bo **bo)
> + struct amdgpu_sa_bo **bo)
> {
> struct amdgpu_device *adev = ring->adev;
> uint32_t *msg;
> int r, i;
>
> *bo = NULL;
> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM,
> - bo, NULL, (void **)&msg);
> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> + bo, 1024, PAGE_SIZE);
> +
> if (r)
> return r;
> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>
> msg[0] = cpu_to_le32(0x00000028);
> msg[1] = cpu_to_le32(0x00000018);
> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo;
> + struct amdgpu_sa_bo *bo;
> long r;
>
> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> }
>
> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> - struct amdgpu_bo *bo,
> + struct amdgpu_sa_bo *bo,
> struct dma_fence **fence)
> {
> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> goto err;
>
> ib = &job->ibs[0];
> - addr = amdgpu_bo_gpu_offset(bo);
> + addr = amdgpu_sa_bo_gpu_addr(bo);
> ib->length_dw = 0;
>
> ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> if (r)
> goto err_free;
>
> - amdgpu_bo_fence(bo, f, false);
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, f);
>
> if (fence)
> *fence = dma_fence_get(f);
> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> amdgpu_job_free(job);
>
> err:
> - amdgpu_bo_unreserve(bo);
> - amdgpu_bo_unref(&bo);
> + amdgpu_sa_bo_free(adev, &bo, NULL);
> return r;
> }
>
> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> {
> struct dma_fence *fence = NULL;
> - struct amdgpu_bo *bo;
> + struct amdgpu_sa_bo *bo;
> long r;
>
> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
> --
> 2.29.2
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2021-01-08 15:25 ` Christian König
@ 2021-01-08 15:42 ` Nirmoy
2021-01-11 9:25 ` Christian König
0 siblings, 1 reply; 12+ messages in thread
From: Nirmoy @ 2021-01-08 15:42 UTC (permalink / raw)
To: Christian König, Nirmoy Das, amd-gfx
On 1/8/21 4:25 PM, Christian König wrote:
> Am 18.12.20 um 15:40 schrieb Nirmoy Das:
>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>
>> v2:
>> * do not initialize bo to get hint from compiler for -Wuninitialized
>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>
> Found a major bug in this which is probably the reason why that never
> worked before.
>
> See below.
>
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>> 3 files changed, 45 insertions(+), 75 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> index 8b989670ed66..13450a3df044 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>> return 0;
>> }
>>
>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>> amdgpu_bo *bo,
>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>> amdgpu_sa_bo *bo,
>> bool direct, struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> unsigned offset_idx = 0;
>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>
>> - amdgpu_bo_kunmap(bo);
>> - amdgpu_bo_unpin(bo);
>> -
>> - if (!ring->adev->uvd.address_64_bit) {
>> - struct ttm_operation_ctx ctx = { true, false };
>> -
>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>> - amdgpu_uvd_force_into_uvd_segment(bo);
>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>> - if (r)
>> - goto err;
>> - }
>> -
>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
>> AMDGPU_IB_POOL_DIRECT :
>> AMDGPU_IB_POOL_DELAYED, &job);
>> if (r)
>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> ib->ptr[0] = data[0];
>> ib->ptr[1] = addr;
>> ib->ptr[2] = data[1];
>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> ib->length_dw = 16;
>>
>> if (direct) {
>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>> - true, false,
>> - msecs_to_jiffies(10));
>> - if (r == 0)
>> - r = -ETIMEDOUT;
>> - if (r < 0)
>> - goto err_free;
>> -
>> r = amdgpu_job_submit_direct(job, ring, &f);
>> if (r)
>> goto err_free;
>> } else {
>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>> - AMDGPU_SYNC_ALWAYS,
>> - AMDGPU_FENCE_OWNER_UNDEFINED);
>> - if (r)
>> - goto err_free;
>> -
>> r = amdgpu_job_submit(job, &adev->uvd.entity,
>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>> if (r)
>> goto err_free;
>> }
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo;
>> uint32_t *msg;
>> int r, i;
>>
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 1024, PAGE_SIZE);
>
> This 1024 here is wrong. The size is in bytes and not double words. So
> this needs to be 4096.
>
> This only worked because amdgpu_bo_create_reserved always allocated a
> full page instead of 1024 bytes.
>
> Same problem on all other occasions.
I wonder why it worked on newer cards.
>
> Additional to that the direct pool should only be used if we submit
> directly, e.g. during a GPU reset.
>
> Otherwise we need to use the normal pool.
I will resend with above changes.
Thanks,
Nirmoy
>
> Regards,
> Christian.
>
>
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>> /* stitch together an UVD create msg */
>> msg[0] = cpu_to_le32(0x00000de4);
>> msg[1] = cpu_to_le32(0x00000000);
>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>> bool direct, struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo;
>> uint32_t *msg;
>> int r, i;
>>
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>> /* stitch together an UVD destroy msg */
>> msg[0] = cpu_to_le32(0x00000de4);
>> msg[1] = cpu_to_le32(0x00000002);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> index 0d5284b936e4..bce29d6975d3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>
>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence);
>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> bool direct, struct dma_fence **fence);
>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device
>> *adev, struct drm_file *filp)
>> * Open up a stream for HW test
>> */
>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> const unsigned ib_size_dw = 1024;
>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>>
>> ib = &job->ibs[0];
>>
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>
>> /* stitch together an VCE create msg */
>> ib->length_dw = 0;
>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
>> amdgpu_ring *ring)
>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo = NULL;
>> + struct amdgpu_device *adev = ring->adev;
>> long r;
>>
>> /* skip vce ring1/2 ib test for now, since it's not reliable */
>> if (ring != &ring->adev->vce.ring[0])
>> return 0;
>>
>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, NULL);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 512, PAGE_SIZE);
>> if (r)
>> return r;
>>
>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring
>> *ring, long timeout)
>> r = 0;
>>
>> error:
>> + amdgpu_sa_bo_free(adev, &bo, fence);
>> dma_fence_put(fence);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> return r;
>> }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> index 4a77c7424dfc..1e46b2f895ba 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
>> amdgpu_ring *ring)
>> }
>>
>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> +
>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>> ib->ptr[1] = addr;
>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_sa_bo **bo)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> +
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>> +
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000038);
>> msg[2] = cpu_to_le32(0x00000001);
>> @@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t hand
>> }
>>
>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_sa_bo **bo)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000018);
>> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct
>> amdgpu_ring *ring, uint32_t han
>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
>> timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_sa_bo *bo;
>> long r;
>>
>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
>> amdgpu_ring *ring, long timeout)
>> }
>>
>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> ib->length_dw = 0;
>>
>> ib->ptr[ib->length_dw++] = sizeof(struct
>> amdgpu_vcn_decode_buffer) + 8;
>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
>> timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_sa_bo *bo;
>> long r;
>>
>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> --
>> 2.29.2
>>
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2021-01-08 15:42 ` Nirmoy
@ 2021-01-11 9:25 ` Christian König
2021-01-11 15:05 ` Alex Deucher
0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2021-01-11 9:25 UTC (permalink / raw)
To: Nirmoy, Christian König, Nirmoy Das, amd-gfx
Am 08.01.21 um 16:42 schrieb Nirmoy:
>
> On 1/8/21 4:25 PM, Christian König wrote:
>> Am 18.12.20 um 15:40 schrieb Nirmoy Das:
>>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>>
>>> v2:
>>> * do not initialize bo to get hint from compiler for -Wuninitialized
>>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>>
>> Found a major bug in this which is probably the reason why that never
>> worked before.
>>
>> See below.
>>
>>>
>>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>>> ---
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56
>>> +++++++------------------
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>>> 3 files changed, 45 insertions(+), 75 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> index 8b989670ed66..13450a3df044 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
>>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>>> return 0;
>>> }
>>>
>>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>> amdgpu_bo *bo,
>>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>> amdgpu_sa_bo *bo,
>>> bool direct, struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> unsigned offset_idx = 0;
>>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>>
>>> - amdgpu_bo_kunmap(bo);
>>> - amdgpu_bo_unpin(bo);
>>> -
>>> - if (!ring->adev->uvd.address_64_bit) {
>>> - struct ttm_operation_ctx ctx = { true, false };
>>> -
>>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>>> - amdgpu_uvd_force_into_uvd_segment(bo);
>>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>>> - if (r)
>>> - goto err;
>>> - }
>>> -
>>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
>>> AMDGPU_IB_POOL_DIRECT :
>>> AMDGPU_IB_POOL_DELAYED, &job);
>>> if (r)
>>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>>
>>> ib = &job->ibs[0];
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>> ib->ptr[0] = data[0];
>>> ib->ptr[1] = addr;
>>> ib->ptr[2] = data[1];
>>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> ib->length_dw = 16;
>>>
>>> if (direct) {
>>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>>> - true, false,
>>> - msecs_to_jiffies(10));
>>> - if (r == 0)
>>> - r = -ETIMEDOUT;
>>> - if (r < 0)
>>> - goto err_free;
>>> -
>>> r = amdgpu_job_submit_direct(job, ring, &f);
>>> if (r)
>>> goto err_free;
>>> } else {
>>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>>> - AMDGPU_SYNC_ALWAYS,
>>> - AMDGPU_FENCE_OWNER_UNDEFINED);
>>> - if (r)
>>> - goto err_free;
>>> -
>>> r = amdgpu_job_submit(job, &adev->uvd.entity,
>>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>>> if (r)
>>> goto err_free;
>>> }
>>>
>>> - amdgpu_bo_fence(bo, f, false);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>
>>> if (fence)
>>> *fence = dma_fence_get(f);
>>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>> amdgpu_job_free(job);
>>>
>>> err:
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>> return r;
>>> }
>>>
>>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
>>> amdgpu_ring *ring, uint32_t handle,
>>> struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> - struct amdgpu_bo *bo = NULL;
>>> + struct amdgpu_sa_bo *bo;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - &bo, NULL, (void **)&msg);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + &bo, 1024, PAGE_SIZE);
>>
>> This 1024 here is wrong. The size is in bytes and not double words.
>> So this needs to be 4096.
>>
>> This only worked because amdgpu_bo_create_reserved always allocated a
>> full page instead of 1024 bytes.
>>
>> Same problem on all other occasions.
>
>
> I wonder why it worked on newer cards.
Maybe just coincident, but there must be more problems. Even with the
size fixed I get a hang of the IB tests during boot.
Christian.
>
>
>>
>> Additional to that the direct pool should only be used if we submit
>> directly, e.g. during a GPU reset.
>>
>> Otherwise we need to use the normal pool.
>
>
> I will resend with above changes.
>
>
> Thanks,
>
> Nirmoy
>
>
>>
>> Regards,
>> Christian.
>>
>>
>>> +
>>> if (r)
>>> return r;
>>>
>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>> /* stitch together an UVD create msg */
>>> msg[0] = cpu_to_le32(0x00000de4);
>>> msg[1] = cpu_to_le32(0x00000000);
>>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
>>> amdgpu_ring *ring, uint32_t handle,
>>> bool direct, struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> - struct amdgpu_bo *bo = NULL;
>>> + struct amdgpu_sa_bo *bo;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - &bo, NULL, (void **)&msg);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + &bo, 1024, PAGE_SIZE);
>>> +
>>> if (r)
>>> return r;
>>>
>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>> /* stitch together an UVD destroy msg */
>>> msg[0] = cpu_to_le32(0x00000de4);
>>> msg[1] = cpu_to_le32(0x00000002);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> index 0d5284b936e4..bce29d6975d3 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>>
>>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence);
>>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> bool direct, struct dma_fence **fence);
>>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct
>>> amdgpu_device *adev, struct drm_file *filp)
>>> * Open up a stream for HW test
>>> */
>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence)
>>> {
>>> const unsigned ib_size_dw = 1024;
>>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
>>> amdgpu_ring *ring, uint32_t handle,
>>>
>>> ib = &job->ibs[0];
>>>
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>
>>> /* stitch together an VCE create msg */
>>> ib->length_dw = 0;
>>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
>>> amdgpu_ring *ring)
>>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>>> {
>>> struct dma_fence *fence = NULL;
>>> - struct amdgpu_bo *bo = NULL;
>>> + struct amdgpu_sa_bo *bo = NULL;
>>> + struct amdgpu_device *adev = ring->adev;
>>> long r;
>>>
>>> /* skip vce ring1/2 ib test for now, since it's not reliable */
>>> if (ring != &ring->adev->vce.ring[0])
>>> return 0;
>>>
>>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - &bo, NULL, NULL);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + &bo, 512, PAGE_SIZE);
>>> if (r)
>>> return r;
>>>
>>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring
>>> *ring, long timeout)
>>> r = 0;
>>>
>>> error:
>>> + amdgpu_sa_bo_free(adev, &bo, fence);
>>> dma_fence_put(fence);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> return r;
>>> }
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> index 4a77c7424dfc..1e46b2f895ba 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
>>> amdgpu_ring *ring)
>>> }
>>>
>>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
>>> amdgpu_ring *ring,
>>> goto err;
>>>
>>> ib = &job->ibs[0];
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>> +
>>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>>> ib->ptr[1] = addr;
>>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
>>> amdgpu_ring *ring,
>>> if (r)
>>> goto err_free;
>>>
>>> - amdgpu_bo_fence(bo, f, false);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>
>>> if (fence)
>>> *fence = dma_fence_get(f);
>>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
>>> amdgpu_ring *ring,
>>> amdgpu_job_free(job);
>>>
>>> err:
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>> return r;
>>> }
>>>
>>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring,
>>> uint32_t handle,
>>> - struct amdgpu_bo **bo)
>>> + struct amdgpu_sa_bo **bo)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> *bo = NULL;
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - bo, NULL, (void **)&msg);
>>> +
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + bo, 1024, PAGE_SIZE);
>>> +
>>> if (r)
>>> return r;
>>>
>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>> +
>>> msg[0] = cpu_to_le32(0x00000028);
>>> msg[1] = cpu_to_le32(0x00000038);
>>> msg[2] = cpu_to_le32(0x00000001);
>>> @@ -575,18 +576,19 @@ static int
>>> amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>>> }
>>>
>>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring
>>> *ring, uint32_t handle,
>>> - struct amdgpu_bo **bo)
>>> + struct amdgpu_sa_bo **bo)
>>> {
>>> struct amdgpu_device *adev = ring->adev;
>>> uint32_t *msg;
>>> int r, i;
>>>
>>> *bo = NULL;
>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>> - bo, NULL, (void **)&msg);
>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>> + bo, 1024, PAGE_SIZE);
>>> +
>>> if (r)
>>> return r;
>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>
>>> msg[0] = cpu_to_le32(0x00000028);
>>> msg[1] = cpu_to_le32(0x00000018);
>>> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct
>>> amdgpu_ring *ring, uint32_t han
>>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
>>> timeout)
>>> {
>>> struct dma_fence *fence = NULL;
>>> - struct amdgpu_bo *bo;
>>> + struct amdgpu_sa_bo *bo;
>>> long r;
>>>
>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
>>> amdgpu_ring *ring, long timeout)
>>> }
>>>
>>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>>> - struct amdgpu_bo *bo,
>>> + struct amdgpu_sa_bo *bo,
>>> struct dma_fence **fence)
>>> {
>>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>> amdgpu_ring *ring,
>>> goto err;
>>>
>>> ib = &job->ibs[0];
>>> - addr = amdgpu_bo_gpu_offset(bo);
>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>> ib->length_dw = 0;
>>>
>>> ib->ptr[ib->length_dw++] = sizeof(struct
>>> amdgpu_vcn_decode_buffer) + 8;
>>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>> amdgpu_ring *ring,
>>> if (r)
>>> goto err_free;
>>>
>>> - amdgpu_bo_fence(bo, f, false);
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>
>>> if (fence)
>>> *fence = dma_fence_get(f);
>>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>> amdgpu_ring *ring,
>>> amdgpu_job_free(job);
>>>
>>> err:
>>> - amdgpu_bo_unreserve(bo);
>>> - amdgpu_bo_unref(&bo);
>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>> return r;
>>> }
>>>
>>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
>>> timeout)
>>> {
>>> struct dma_fence *fence = NULL;
>>> - struct amdgpu_bo *bo;
>>> + struct amdgpu_sa_bo *bo;
>>> long r;
>>>
>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>> --
>>> 2.29.2
>>>
>>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2021-01-11 9:25 ` Christian König
@ 2021-01-11 15:05 ` Alex Deucher
2021-01-12 10:55 ` Christian König
0 siblings, 1 reply; 12+ messages in thread
From: Alex Deucher @ 2021-01-11 15:05 UTC (permalink / raw)
To: Christian Koenig; +Cc: Nirmoy, Nirmoy Das, amd-gfx list
On Mon, Jan 11, 2021 at 4:25 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Am 08.01.21 um 16:42 schrieb Nirmoy:
> >
> > On 1/8/21 4:25 PM, Christian König wrote:
> >> Am 18.12.20 um 15:40 schrieb Nirmoy Das:
> >>> Use amdgpu_sa_bo instead of amdgpu_bo.
> >>>
> >>> v2:
> >>> * do not initialize bo to get hint from compiler for -Wuninitialized
> >>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
> >>
> >> Found a major bug in this which is probably the reason why that never
> >> worked before.
> >>
> >> See below.
> >>
> >>>
> >>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> >>> ---
> >>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56
> >>> +++++++------------------
> >>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
> >>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
> >>> 3 files changed, 45 insertions(+), 75 deletions(-)
> >>>
> >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> >>> index 8b989670ed66..13450a3df044 100644
> >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> >>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
> >>> amdgpu_cs_parser *parser, uint32_t ib_idx)
> >>> return 0;
> >>> }
> >>>
> >>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
> >>> amdgpu_bo *bo,
> >>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
> >>> amdgpu_sa_bo *bo,
> >>> bool direct, struct dma_fence **fence)
> >>> {
> >>> struct amdgpu_device *adev = ring->adev;
> >>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
> >>> amdgpu_ring *ring, struct amdgpu_bo *bo,
> >>> unsigned offset_idx = 0;
> >>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
> >>>
> >>> - amdgpu_bo_kunmap(bo);
> >>> - amdgpu_bo_unpin(bo);
> >>> -
> >>> - if (!ring->adev->uvd.address_64_bit) {
> >>> - struct ttm_operation_ctx ctx = { true, false };
> >>> -
> >>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
> >>> - amdgpu_uvd_force_into_uvd_segment(bo);
> >>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> >>> - if (r)
> >>> - goto err;
> >>> - }
> >>> -
> >>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
> >>> AMDGPU_IB_POOL_DIRECT :
> >>> AMDGPU_IB_POOL_DELAYED, &job);
> >>> if (r)
> >>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
> >>> amdgpu_ring *ring, struct amdgpu_bo *bo,
> >>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
> >>>
> >>> ib = &job->ibs[0];
> >>> - addr = amdgpu_bo_gpu_offset(bo);
> >>> + addr = amdgpu_sa_bo_gpu_addr(bo);
> >>> ib->ptr[0] = data[0];
> >>> ib->ptr[1] = addr;
> >>> ib->ptr[2] = data[1];
> >>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
> >>> amdgpu_ring *ring, struct amdgpu_bo *bo,
> >>> ib->length_dw = 16;
> >>>
> >>> if (direct) {
> >>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> >>> - true, false,
> >>> - msecs_to_jiffies(10));
> >>> - if (r == 0)
> >>> - r = -ETIMEDOUT;
> >>> - if (r < 0)
> >>> - goto err_free;
> >>> -
> >>> r = amdgpu_job_submit_direct(job, ring, &f);
> >>> if (r)
> >>> goto err_free;
> >>> } else {
> >>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
> >>> - AMDGPU_SYNC_ALWAYS,
> >>> - AMDGPU_FENCE_OWNER_UNDEFINED);
> >>> - if (r)
> >>> - goto err_free;
> >>> -
> >>> r = amdgpu_job_submit(job, &adev->uvd.entity,
> >>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
> >>> if (r)
> >>> goto err_free;
> >>> }
> >>>
> >>> - amdgpu_bo_fence(bo, f, false);
> >>> - amdgpu_bo_unreserve(bo);
> >>> - amdgpu_bo_unref(&bo);
> >>> + amdgpu_sa_bo_free(adev, &bo, f);
> >>>
> >>> if (fence)
> >>> *fence = dma_fence_get(f);
> >>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
> >>> amdgpu_ring *ring, struct amdgpu_bo *bo,
> >>> amdgpu_job_free(job);
> >>>
> >>> err:
> >>> - amdgpu_bo_unreserve(bo);
> >>> - amdgpu_bo_unref(&bo);
> >>> + amdgpu_sa_bo_free(adev, &bo, NULL);
> >>> return r;
> >>> }
> >>>
> >>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
> >>> amdgpu_ring *ring, uint32_t handle,
> >>> struct dma_fence **fence)
> >>> {
> >>> struct amdgpu_device *adev = ring->adev;
> >>> - struct amdgpu_bo *bo = NULL;
> >>> + struct amdgpu_sa_bo *bo;
> >>> uint32_t *msg;
> >>> int r, i;
> >>>
> >>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> >>> - AMDGPU_GEM_DOMAIN_VRAM,
> >>> - &bo, NULL, (void **)&msg);
> >>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> >>> + &bo, 1024, PAGE_SIZE);
> >>
> >> This 1024 here is wrong. The size is in bytes and not double words.
> >> So this needs to be 4096.
> >>
> >> This only worked because amdgpu_bo_create_reserved always allocated a
> >> full page instead of 1024 bytes.
> >>
> >> Same problem on all other occasions.
> >
> >
> > I wonder why it worked on newer cards.
>
> Maybe just coincident, but there must be more problems. Even with the
> size fixed I get a hang of the IB tests during boot.
>
Maybe related to the recent change to move some of the UVD buffers
from vram to gtt?
Alex
> Christian.
>
> >
> >
> >>
> >> Additional to that the direct pool should only be used if we submit
> >> directly, e.g. during a GPU reset.
> >>
> >> Otherwise we need to use the normal pool.
> >
> >
> > I will resend with above changes.
> >
> >
> > Thanks,
> >
> > Nirmoy
> >
> >
> >>
> >> Regards,
> >> Christian.
> >>
> >>
> >>> +
> >>> if (r)
> >>> return r;
> >>>
> >>> + msg = amdgpu_sa_bo_cpu_addr(bo);
> >>> /* stitch together an UVD create msg */
> >>> msg[0] = cpu_to_le32(0x00000de4);
> >>> msg[1] = cpu_to_le32(0x00000000);
> >>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
> >>> amdgpu_ring *ring, uint32_t handle,
> >>> bool direct, struct dma_fence **fence)
> >>> {
> >>> struct amdgpu_device *adev = ring->adev;
> >>> - struct amdgpu_bo *bo = NULL;
> >>> + struct amdgpu_sa_bo *bo;
> >>> uint32_t *msg;
> >>> int r, i;
> >>>
> >>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> >>> - AMDGPU_GEM_DOMAIN_VRAM,
> >>> - &bo, NULL, (void **)&msg);
> >>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> >>> + &bo, 1024, PAGE_SIZE);
> >>> +
> >>> if (r)
> >>> return r;
> >>>
> >>> + msg = amdgpu_sa_bo_cpu_addr(bo);
> >>> /* stitch together an UVD destroy msg */
> >>> msg[0] = cpu_to_le32(0x00000de4);
> >>> msg[1] = cpu_to_le32(0x00000002);
> >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> >>> index 0d5284b936e4..bce29d6975d3 100644
> >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> >>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
> >>>
> >>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
> >>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
> >>> uint32_t handle,
> >>> - struct amdgpu_bo *bo,
> >>> + struct amdgpu_sa_bo *bo,
> >>> struct dma_fence **fence);
> >>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
> >>> uint32_t handle,
> >>> bool direct, struct dma_fence **fence);
> >>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct
> >>> amdgpu_device *adev, struct drm_file *filp)
> >>> * Open up a stream for HW test
> >>> */
> >>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
> >>> uint32_t handle,
> >>> - struct amdgpu_bo *bo,
> >>> + struct amdgpu_sa_bo *bo,
> >>> struct dma_fence **fence)
> >>> {
> >>> const unsigned ib_size_dw = 1024;
> >>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
> >>> amdgpu_ring *ring, uint32_t handle,
> >>>
> >>> ib = &job->ibs[0];
> >>>
> >>> - addr = amdgpu_bo_gpu_offset(bo);
> >>> + addr = amdgpu_sa_bo_gpu_addr(bo);
> >>>
> >>> /* stitch together an VCE create msg */
> >>> ib->length_dw = 0;
> >>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
> >>> amdgpu_ring *ring)
> >>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
> >>> {
> >>> struct dma_fence *fence = NULL;
> >>> - struct amdgpu_bo *bo = NULL;
> >>> + struct amdgpu_sa_bo *bo = NULL;
> >>> + struct amdgpu_device *adev = ring->adev;
> >>> long r;
> >>>
> >>> /* skip vce ring1/2 ib test for now, since it's not reliable */
> >>> if (ring != &ring->adev->vce.ring[0])
> >>> return 0;
> >>>
> >>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
> >>> - AMDGPU_GEM_DOMAIN_VRAM,
> >>> - &bo, NULL, NULL);
> >>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> >>> + &bo, 512, PAGE_SIZE);
> >>> if (r)
> >>> return r;
> >>>
> >>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring
> >>> *ring, long timeout)
> >>> r = 0;
> >>>
> >>> error:
> >>> + amdgpu_sa_bo_free(adev, &bo, fence);
> >>> dma_fence_put(fence);
> >>> - amdgpu_bo_unreserve(bo);
> >>> - amdgpu_bo_unref(&bo);
> >>> return r;
> >>> }
> >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> >>> index 4a77c7424dfc..1e46b2f895ba 100644
> >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> >>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
> >>> amdgpu_ring *ring)
> >>> }
> >>>
> >>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> >>> - struct amdgpu_bo *bo,
> >>> + struct amdgpu_sa_bo *bo,
> >>> struct dma_fence **fence)
> >>> {
> >>> struct amdgpu_device *adev = ring->adev;
> >>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
> >>> amdgpu_ring *ring,
> >>> goto err;
> >>>
> >>> ib = &job->ibs[0];
> >>> - addr = amdgpu_bo_gpu_offset(bo);
> >>> + addr = amdgpu_sa_bo_gpu_addr(bo);
> >>> +
> >>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
> >>> ib->ptr[1] = addr;
> >>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
> >>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
> >>> amdgpu_ring *ring,
> >>> if (r)
> >>> goto err_free;
> >>>
> >>> - amdgpu_bo_fence(bo, f, false);
> >>> - amdgpu_bo_unreserve(bo);
> >>> - amdgpu_bo_unref(&bo);
> >>> + amdgpu_sa_bo_free(adev, &bo, f);
> >>>
> >>> if (fence)
> >>> *fence = dma_fence_get(f);
> >>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
> >>> amdgpu_ring *ring,
> >>> amdgpu_job_free(job);
> >>>
> >>> err:
> >>> - amdgpu_bo_unreserve(bo);
> >>> - amdgpu_bo_unref(&bo);
> >>> + amdgpu_sa_bo_free(adev, &bo, NULL);
> >>> return r;
> >>> }
> >>>
> >>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring,
> >>> uint32_t handle,
> >>> - struct amdgpu_bo **bo)
> >>> + struct amdgpu_sa_bo **bo)
> >>> {
> >>> struct amdgpu_device *adev = ring->adev;
> >>> uint32_t *msg;
> >>> int r, i;
> >>>
> >>> *bo = NULL;
> >>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> >>> - AMDGPU_GEM_DOMAIN_VRAM,
> >>> - bo, NULL, (void **)&msg);
> >>> +
> >>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> >>> + bo, 1024, PAGE_SIZE);
> >>> +
> >>> if (r)
> >>> return r;
> >>>
> >>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
> >>> +
> >>> msg[0] = cpu_to_le32(0x00000028);
> >>> msg[1] = cpu_to_le32(0x00000038);
> >>> msg[2] = cpu_to_le32(0x00000001);
> >>> @@ -575,18 +576,19 @@ static int
> >>> amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
> >>> }
> >>>
> >>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring
> >>> *ring, uint32_t handle,
> >>> - struct amdgpu_bo **bo)
> >>> + struct amdgpu_sa_bo **bo)
> >>> {
> >>> struct amdgpu_device *adev = ring->adev;
> >>> uint32_t *msg;
> >>> int r, i;
> >>>
> >>> *bo = NULL;
> >>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> >>> - AMDGPU_GEM_DOMAIN_VRAM,
> >>> - bo, NULL, (void **)&msg);
> >>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
> >>> + bo, 1024, PAGE_SIZE);
> >>> +
> >>> if (r)
> >>> return r;
> >>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
> >>>
> >>> msg[0] = cpu_to_le32(0x00000028);
> >>> msg[1] = cpu_to_le32(0x00000018);
> >>> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct
> >>> amdgpu_ring *ring, uint32_t han
> >>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
> >>> timeout)
> >>> {
> >>> struct dma_fence *fence = NULL;
> >>> - struct amdgpu_bo *bo;
> >>> + struct amdgpu_sa_bo *bo;
> >>> long r;
> >>>
> >>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
> >>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
> >>> amdgpu_ring *ring, long timeout)
> >>> }
> >>>
> >>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
> >>> - struct amdgpu_bo *bo,
> >>> + struct amdgpu_sa_bo *bo,
> >>> struct dma_fence **fence)
> >>> {
> >>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
> >>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
> >>> amdgpu_ring *ring,
> >>> goto err;
> >>>
> >>> ib = &job->ibs[0];
> >>> - addr = amdgpu_bo_gpu_offset(bo);
> >>> + addr = amdgpu_sa_bo_gpu_addr(bo);
> >>> ib->length_dw = 0;
> >>>
> >>> ib->ptr[ib->length_dw++] = sizeof(struct
> >>> amdgpu_vcn_decode_buffer) + 8;
> >>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
> >>> amdgpu_ring *ring,
> >>> if (r)
> >>> goto err_free;
> >>>
> >>> - amdgpu_bo_fence(bo, f, false);
> >>> - amdgpu_bo_unreserve(bo);
> >>> - amdgpu_bo_unref(&bo);
> >>> + amdgpu_sa_bo_free(adev, &bo, f);
> >>>
> >>> if (fence)
> >>> *fence = dma_fence_get(f);
> >>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
> >>> amdgpu_ring *ring,
> >>> amdgpu_job_free(job);
> >>>
> >>> err:
> >>> - amdgpu_bo_unreserve(bo);
> >>> - amdgpu_bo_unref(&bo);
> >>> + amdgpu_sa_bo_free(adev, &bo, NULL);
> >>> return r;
> >>> }
> >>>
> >>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
> >>> timeout)
> >>> {
> >>> struct dma_fence *fence = NULL;
> >>> - struct amdgpu_bo *bo;
> >>> + struct amdgpu_sa_bo *bo;
> >>> long r;
> >>>
> >>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
> >>> --
> >>> 2.29.2
> >>>
> >>
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
2021-01-11 15:05 ` Alex Deucher
@ 2021-01-12 10:55 ` Christian König
0 siblings, 0 replies; 12+ messages in thread
From: Christian König @ 2021-01-12 10:55 UTC (permalink / raw)
To: Alex Deucher; +Cc: Nirmoy, Nirmoy Das, amd-gfx list
Am 11.01.21 um 16:05 schrieb Alex Deucher:
> On Mon, Jan 11, 2021 at 4:25 AM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
>> Am 08.01.21 um 16:42 schrieb Nirmoy:
>>> On 1/8/21 4:25 PM, Christian König wrote:
>>>> Am 18.12.20 um 15:40 schrieb Nirmoy Das:
>>>>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>>>>
>>>>> v2:
>>>>> * do not initialize bo to get hint from compiler for -Wuninitialized
>>>>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>>>> Found a major bug in this which is probably the reason why that never
>>>> worked before.
>>>>
>>>> See below.
>>>>
>>>>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>>>>> ---
>>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56
>>>>> +++++++------------------
>>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>>>>> 3 files changed, 45 insertions(+), 75 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>>> index 8b989670ed66..13450a3df044 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>>>>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
>>>>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>>>>> return 0;
>>>>> }
>>>>>
>>>>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>>>> amdgpu_bo *bo,
>>>>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>>>>> amdgpu_sa_bo *bo,
>>>>> bool direct, struct dma_fence **fence)
>>>>> {
>>>>> struct amdgpu_device *adev = ring->adev;
>>>>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
>>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>> unsigned offset_idx = 0;
>>>>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>>>>
>>>>> - amdgpu_bo_kunmap(bo);
>>>>> - amdgpu_bo_unpin(bo);
>>>>> -
>>>>> - if (!ring->adev->uvd.address_64_bit) {
>>>>> - struct ttm_operation_ctx ctx = { true, false };
>>>>> -
>>>>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>>>>> - amdgpu_uvd_force_into_uvd_segment(bo);
>>>>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>>>>> - if (r)
>>>>> - goto err;
>>>>> - }
>>>>> -
>>>>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
>>>>> AMDGPU_IB_POOL_DIRECT :
>>>>> AMDGPU_IB_POOL_DELAYED, &job);
>>>>> if (r)
>>>>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
>>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>>>>
>>>>> ib = &job->ibs[0];
>>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>> ib->ptr[0] = data[0];
>>>>> ib->ptr[1] = addr;
>>>>> ib->ptr[2] = data[1];
>>>>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
>>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>> ib->length_dw = 16;
>>>>>
>>>>> if (direct) {
>>>>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>>>>> - true, false,
>>>>> - msecs_to_jiffies(10));
>>>>> - if (r == 0)
>>>>> - r = -ETIMEDOUT;
>>>>> - if (r < 0)
>>>>> - goto err_free;
>>>>> -
>>>>> r = amdgpu_job_submit_direct(job, ring, &f);
>>>>> if (r)
>>>>> goto err_free;
>>>>> } else {
>>>>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>>>>> - AMDGPU_SYNC_ALWAYS,
>>>>> - AMDGPU_FENCE_OWNER_UNDEFINED);
>>>>> - if (r)
>>>>> - goto err_free;
>>>>> -
>>>>> r = amdgpu_job_submit(job, &adev->uvd.entity,
>>>>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>>>>> if (r)
>>>>> goto err_free;
>>>>> }
>>>>>
>>>>> - amdgpu_bo_fence(bo, f, false);
>>>>> - amdgpu_bo_unreserve(bo);
>>>>> - amdgpu_bo_unref(&bo);
>>>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>>>
>>>>> if (fence)
>>>>> *fence = dma_fence_get(f);
>>>>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
>>>>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>>>>> amdgpu_job_free(job);
>>>>>
>>>>> err:
>>>>> - amdgpu_bo_unreserve(bo);
>>>>> - amdgpu_bo_unref(&bo);
>>>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>>>> return r;
>>>>> }
>>>>>
>>>>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
>>>>> amdgpu_ring *ring, uint32_t handle,
>>>>> struct dma_fence **fence)
>>>>> {
>>>>> struct amdgpu_device *adev = ring->adev;
>>>>> - struct amdgpu_bo *bo = NULL;
>>>>> + struct amdgpu_sa_bo *bo;
>>>>> uint32_t *msg;
>>>>> int r, i;
>>>>>
>>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>>> - &bo, NULL, (void **)&msg);
>>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>>> + &bo, 1024, PAGE_SIZE);
>>>> This 1024 here is wrong. The size is in bytes and not double words.
>>>> So this needs to be 4096.
>>>>
>>>> This only worked because amdgpu_bo_create_reserved always allocated a
>>>> full page instead of 1024 bytes.
>>>>
>>>> Same problem on all other occasions.
>>>
>>> I wonder why it worked on newer cards.
>> Maybe just coincident, but there must be more problems. Even with the
>> size fixed I get a hang of the IB tests during boot.
>>
> Maybe related to the recent change to move some of the UVD buffers
> from vram to gtt?
I thought about that as well, but that didn't seem to cause any problems.
Christian.
>
> Alex
>
>
>> Christian.
>>
>>>
>>>> Additional to that the direct pool should only be used if we submit
>>>> directly, e.g. during a GPU reset.
>>>>
>>>> Otherwise we need to use the normal pool.
>>>
>>> I will resend with above changes.
>>>
>>>
>>> Thanks,
>>>
>>> Nirmoy
>>>
>>>
>>>> Regards,
>>>> Christian.
>>>>
>>>>
>>>>> +
>>>>> if (r)
>>>>> return r;
>>>>>
>>>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>>>> /* stitch together an UVD create msg */
>>>>> msg[0] = cpu_to_le32(0x00000de4);
>>>>> msg[1] = cpu_to_le32(0x00000000);
>>>>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
>>>>> amdgpu_ring *ring, uint32_t handle,
>>>>> bool direct, struct dma_fence **fence)
>>>>> {
>>>>> struct amdgpu_device *adev = ring->adev;
>>>>> - struct amdgpu_bo *bo = NULL;
>>>>> + struct amdgpu_sa_bo *bo;
>>>>> uint32_t *msg;
>>>>> int r, i;
>>>>>
>>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>>> - &bo, NULL, (void **)&msg);
>>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>>> + &bo, 1024, PAGE_SIZE);
>>>>> +
>>>>> if (r)
>>>>> return r;
>>>>>
>>>>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>>>>> /* stitch together an UVD destroy msg */
>>>>> msg[0] = cpu_to_le32(0x00000de4);
>>>>> msg[1] = cpu_to_le32(0x00000002);
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>>> index 0d5284b936e4..bce29d6975d3 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>>>>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>>>>
>>>>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>>>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>>>> uint32_t handle,
>>>>> - struct amdgpu_bo *bo,
>>>>> + struct amdgpu_sa_bo *bo,
>>>>> struct dma_fence **fence);
>>>>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
>>>>> uint32_t handle,
>>>>> bool direct, struct dma_fence **fence);
>>>>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct
>>>>> amdgpu_device *adev, struct drm_file *filp)
>>>>> * Open up a stream for HW test
>>>>> */
>>>>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>>>>> uint32_t handle,
>>>>> - struct amdgpu_bo *bo,
>>>>> + struct amdgpu_sa_bo *bo,
>>>>> struct dma_fence **fence)
>>>>> {
>>>>> const unsigned ib_size_dw = 1024;
>>>>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
>>>>> amdgpu_ring *ring, uint32_t handle,
>>>>>
>>>>> ib = &job->ibs[0];
>>>>>
>>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>>
>>>>> /* stitch together an VCE create msg */
>>>>> ib->length_dw = 0;
>>>>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
>>>>> amdgpu_ring *ring)
>>>>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>>>>> {
>>>>> struct dma_fence *fence = NULL;
>>>>> - struct amdgpu_bo *bo = NULL;
>>>>> + struct amdgpu_sa_bo *bo = NULL;
>>>>> + struct amdgpu_device *adev = ring->adev;
>>>>> long r;
>>>>>
>>>>> /* skip vce ring1/2 ib test for now, since it's not reliable */
>>>>> if (ring != &ring->adev->vce.ring[0])
>>>>> return 0;
>>>>>
>>>>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>>> - &bo, NULL, NULL);
>>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>>> + &bo, 512, PAGE_SIZE);
>>>>> if (r)
>>>>> return r;
>>>>>
>>>>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring
>>>>> *ring, long timeout)
>>>>> r = 0;
>>>>>
>>>>> error:
>>>>> + amdgpu_sa_bo_free(adev, &bo, fence);
>>>>> dma_fence_put(fence);
>>>>> - amdgpu_bo_unreserve(bo);
>>>>> - amdgpu_bo_unref(&bo);
>>>>> return r;
>>>>> }
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>>> index 4a77c7424dfc..1e46b2f895ba 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>>>>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
>>>>> amdgpu_ring *ring)
>>>>> }
>>>>>
>>>>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>>>>> - struct amdgpu_bo *bo,
>>>>> + struct amdgpu_sa_bo *bo,
>>>>> struct dma_fence **fence)
>>>>> {
>>>>> struct amdgpu_device *adev = ring->adev;
>>>>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
>>>>> amdgpu_ring *ring,
>>>>> goto err;
>>>>>
>>>>> ib = &job->ibs[0];
>>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>> +
>>>>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>>>>> ib->ptr[1] = addr;
>>>>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>>>>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
>>>>> amdgpu_ring *ring,
>>>>> if (r)
>>>>> goto err_free;
>>>>>
>>>>> - amdgpu_bo_fence(bo, f, false);
>>>>> - amdgpu_bo_unreserve(bo);
>>>>> - amdgpu_bo_unref(&bo);
>>>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>>>
>>>>> if (fence)
>>>>> *fence = dma_fence_get(f);
>>>>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
>>>>> amdgpu_ring *ring,
>>>>> amdgpu_job_free(job);
>>>>>
>>>>> err:
>>>>> - amdgpu_bo_unreserve(bo);
>>>>> - amdgpu_bo_unref(&bo);
>>>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>>>> return r;
>>>>> }
>>>>>
>>>>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring,
>>>>> uint32_t handle,
>>>>> - struct amdgpu_bo **bo)
>>>>> + struct amdgpu_sa_bo **bo)
>>>>> {
>>>>> struct amdgpu_device *adev = ring->adev;
>>>>> uint32_t *msg;
>>>>> int r, i;
>>>>>
>>>>> *bo = NULL;
>>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>>> - bo, NULL, (void **)&msg);
>>>>> +
>>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>>> + bo, 1024, PAGE_SIZE);
>>>>> +
>>>>> if (r)
>>>>> return r;
>>>>>
>>>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>>> +
>>>>> msg[0] = cpu_to_le32(0x00000028);
>>>>> msg[1] = cpu_to_le32(0x00000038);
>>>>> msg[2] = cpu_to_le32(0x00000001);
>>>>> @@ -575,18 +576,19 @@ static int
>>>>> amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>>>>> }
>>>>>
>>>>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring
>>>>> *ring, uint32_t handle,
>>>>> - struct amdgpu_bo **bo)
>>>>> + struct amdgpu_sa_bo **bo)
>>>>> {
>>>>> struct amdgpu_device *adev = ring->adev;
>>>>> uint32_t *msg;
>>>>> int r, i;
>>>>>
>>>>> *bo = NULL;
>>>>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>>>>> - AMDGPU_GEM_DOMAIN_VRAM,
>>>>> - bo, NULL, (void **)&msg);
>>>>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>>>>> + bo, 1024, PAGE_SIZE);
>>>>> +
>>>>> if (r)
>>>>> return r;
>>>>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>>>>
>>>>> msg[0] = cpu_to_le32(0x00000028);
>>>>> msg[1] = cpu_to_le32(0x00000018);
>>>>> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct
>>>>> amdgpu_ring *ring, uint32_t han
>>>>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
>>>>> timeout)
>>>>> {
>>>>> struct dma_fence *fence = NULL;
>>>>> - struct amdgpu_bo *bo;
>>>>> + struct amdgpu_sa_bo *bo;
>>>>> long r;
>>>>>
>>>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>>>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
>>>>> amdgpu_ring *ring, long timeout)
>>>>> }
>>>>>
>>>>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>>>>> - struct amdgpu_bo *bo,
>>>>> + struct amdgpu_sa_bo *bo,
>>>>> struct dma_fence **fence)
>>>>> {
>>>>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>>>>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>>>> amdgpu_ring *ring,
>>>>> goto err;
>>>>>
>>>>> ib = &job->ibs[0];
>>>>> - addr = amdgpu_bo_gpu_offset(bo);
>>>>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>>>> ib->length_dw = 0;
>>>>>
>>>>> ib->ptr[ib->length_dw++] = sizeof(struct
>>>>> amdgpu_vcn_decode_buffer) + 8;
>>>>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>>>> amdgpu_ring *ring,
>>>>> if (r)
>>>>> goto err_free;
>>>>>
>>>>> - amdgpu_bo_fence(bo, f, false);
>>>>> - amdgpu_bo_unreserve(bo);
>>>>> - amdgpu_bo_unref(&bo);
>>>>> + amdgpu_sa_bo_free(adev, &bo, f);
>>>>>
>>>>> if (fence)
>>>>> *fence = dma_fence_get(f);
>>>>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>>>>> amdgpu_ring *ring,
>>>>> amdgpu_job_free(job);
>>>>>
>>>>> err:
>>>>> - amdgpu_bo_unreserve(bo);
>>>>> - amdgpu_bo_unref(&bo);
>>>>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>>>>> return r;
>>>>> }
>>>>>
>>>>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
>>>>> timeout)
>>>>> {
>>>>> struct dma_fence *fence = NULL;
>>>>> - struct amdgpu_bo *bo;
>>>>> + struct amdgpu_sa_bo *bo;
>>>>> long r;
>>>>>
>>>>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>>>>> --
>>>>> 2.29.2
>>>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=04%7C01%7Cchristian.koenig%40amd.com%7C9e6ae57d1e15465f331b08d8b6426c2c%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637459743710865658%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=iwF3vHSU%2BdnWQ5H1h98Y2CgcukC1SBi3vFHdoL8YSNE%3D&reserved=0
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=04%7C01%7Cchristian.koenig%40amd.com%7C9e6ae57d1e15465f331b08d8b6426c2c%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637459743710865658%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=iwF3vHSU%2BdnWQ5H1h98Y2CgcukC1SBi3vFHdoL8YSNE%3D&reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2021-01-12 10:55 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-18 13:55 [PATCH 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests Nirmoy Das
2020-12-18 14:02 ` Christian König
2020-12-18 14:40 ` [PATCH v2 " Nirmoy Das
2021-01-08 12:20 ` Nirmoy
2021-01-08 13:38 ` Christian König
2021-01-08 15:11 ` Christian König
2021-01-08 15:37 ` Nirmoy
2021-01-08 15:25 ` Christian König
2021-01-08 15:42 ` Nirmoy
2021-01-11 9:25 ` Christian König
2021-01-11 15:05 ` Alex Deucher
2021-01-12 10:55 ` Christian König
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.