* [PATCH] drm/amdgpu: handle more than 10 UVD sessions (v2)
@ 2016-04-12 11:46 Christian König
2016-04-12 13:14 ` Emil Velikov
2016-04-12 15:22 ` Alex Deucher
0 siblings, 2 replies; 4+ messages in thread
From: Christian König @ 2016-04-12 11:46 UTC (permalink / raw)
To: dri-devel
From: Arindam Nath <arindam.nath@amd.com>
Change History
--------------
v2:
- Make firmware version check correctly. Firmware
versions >= 1.80 should all support 40 UVD
instances.
- Replace AMDGPU_MAX_UVD_HANDLES with max_handles
variable.
v1:
- The firmware can handle upto 40 UVD sessions.
Signed-off-by: Arindam Nath <arindam.nath@amd.com>
Signed-off-by: Ayyappa Chandolu <ayyappa.chandolu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 11 +++++---
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 30 ++++++++++++++++------
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 5 ++--
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 5 ++--
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 7 +++--
.../gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h | 1 +
6 files changed, 41 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 36afabb..4805e45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1592,16 +1592,19 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
* UVD
*/
-#define AMDGPU_MAX_UVD_HANDLES 10
-#define AMDGPU_UVD_STACK_SIZE (1024*1024)
-#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
-#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+#define AMDGPU_DEFAULT_UVD_HANDLES 10
+#define AMDGPU_MAX_UVD_HANDLES 40
+#define AMDGPU_UVD_STACK_SIZE (200*1024)
+#define AMDGPU_UVD_HEAP_SIZE (256*1024)
+#define AMDGPU_UVD_SESSION_SIZE (50*1024)
+#define AMDGPU_UVD_FIRMWARE_OFFSET 256
struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
+ unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 338da80..76ebc10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -151,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
+ /* Set the default UVD handles that the firmware can handle */
+ adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
+
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
@@ -158,8 +161,19 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
+ /*
+ * Limit the number of UVD handles depending on microcode major
+ * and minor versions. The firmware version which has 40 UVD
+ * instances support is 1.80. So all subsequent versions should
+ * also have the same support.
+ */
+ if ((version_major > 0x01) ||
+ ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
- + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+ + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@@ -202,7 +216,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
}
@@ -248,7 +262,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
@@ -303,7 +317,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
struct amdgpu_ring *ring = &adev->uvd.ring;
int i, r;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
@@ -563,7 +577,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
amdgpu_bo_kunmap(bo);
/* try to alloc a new handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL;
@@ -586,7 +600,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
return r;
/* validate the handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
if (adev->uvd.filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n");
@@ -601,7 +615,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 2:
/* it's a destroy msg, free the handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
@@ -1013,7 +1027,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
++handles;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index cb46375..0d6b9e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -559,12 +559,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
addr += size;
- size = AMDGPU_UVD_STACK_SIZE >> 3;
+ size = AMDGPU_UVD_HEAP_SIZE >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
addr += size;
- size = AMDGPU_UVD_HEAP_SIZE >> 3;
+ size = (AMDGPU_UVD_STACK_SIZE +
+ (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 16476d8..24f03d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -271,12 +271,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
- size = AMDGPU_UVD_STACK_SIZE;
+ size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
- size = AMDGPU_UVD_HEAP_SIZE;
+ size = AMDGPU_UVD_STACK_SIZE +
+ (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d493791..b06cae6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -270,18 +270,21 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
- size = AMDGPU_UVD_STACK_SIZE;
+ size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
- size = AMDGPU_UVD_HEAP_SIZE;
+ size = AMDGPU_UVD_STACK_SIZE +
+ (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+ WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
}
static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index b2d4aaf..6f6fb34 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -111,5 +111,6 @@
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
+#define mmUVD_GP_SCRATCH4 0x3d38
#endif /* UVD_6_0_D_H */
--
1.9.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] drm/amdgpu: handle more than 10 UVD sessions (v2)
2016-04-12 11:46 [PATCH] drm/amdgpu: handle more than 10 UVD sessions (v2) Christian König
@ 2016-04-12 13:14 ` Emil Velikov
2016-04-12 13:26 ` Christian König
2016-04-12 15:22 ` Alex Deucher
1 sibling, 1 reply; 4+ messages in thread
From: Emil Velikov @ 2016-04-12 13:14 UTC (permalink / raw)
To: Christian König, Arindam Nath; +Cc: ML dri-devel
On 12 April 2016 at 12:46, Christian König <deathsimple@vodafone.de> wrote:
> From: Arindam Nath <arindam.nath@amd.com>
>
> Change History
> --------------
>
> v2:
> - Make firmware version check correctly. Firmware
> versions >= 1.80 should all support 40 UVD
> instances.
Fwiw, this is the type of information that people [unfamiliar with the
firmware] will appreciate in the commit message. Currently it's almost
like a "here a magic commit"
Can we have something like that for the future, pretty please ?
-Emil
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] drm/amdgpu: handle more than 10 UVD sessions (v2)
2016-04-12 13:14 ` Emil Velikov
@ 2016-04-12 13:26 ` Christian König
0 siblings, 0 replies; 4+ messages in thread
From: Christian König @ 2016-04-12 13:26 UTC (permalink / raw)
To: Emil Velikov, Arindam Nath; +Cc: ML dri-devel
Am 12.04.2016 um 15:14 schrieb Emil Velikov:
> On 12 April 2016 at 12:46, Christian König <deathsimple@vodafone.de> wrote:
>> From: Arindam Nath <arindam.nath@amd.com>
>>
>> Change History
>> --------------
>>
>> v2:
>> - Make firmware version check correctly. Firmware
>> versions >= 1.80 should all support 40 UVD
>> instances.
> Fwiw, this is the type of information that people [unfamiliar with the
> firmware] will appreciate in the commit message. Currently it's almost
> like a "here a magic commit"
>
> Can we have something like that for the future, pretty please ?
Well, unfortunately not most of the time. A lot of firmware changes are
usually about top secrete stuff where you need explicit approval.
Getting this for each individual firmware change is usually just to much
overhead.
Christan.
>
> -Emil
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] drm/amdgpu: handle more than 10 UVD sessions (v2)
2016-04-12 11:46 [PATCH] drm/amdgpu: handle more than 10 UVD sessions (v2) Christian König
2016-04-12 13:14 ` Emil Velikov
@ 2016-04-12 15:22 ` Alex Deucher
1 sibling, 0 replies; 4+ messages in thread
From: Alex Deucher @ 2016-04-12 15:22 UTC (permalink / raw)
To: Christian König; +Cc: Maling list - DRI developers
Applied, thanks!
Alex
On Tue, Apr 12, 2016 at 7:46 AM, Christian König
<deathsimple@vodafone.de> wrote:
> From: Arindam Nath <arindam.nath@amd.com>
>
> Change History
> --------------
>
> v2:
> - Make firmware version check correctly. Firmware
> versions >= 1.80 should all support 40 UVD
> instances.
> - Replace AMDGPU_MAX_UVD_HANDLES with max_handles
> variable.
>
> v1:
> - The firmware can handle upto 40 UVD sessions.
>
> Signed-off-by: Arindam Nath <arindam.nath@amd.com>
> Signed-off-by: Ayyappa Chandolu <ayyappa.chandolu@amd.com>
> Reviewed-by: Christian König <christian.koenig@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 11 +++++---
> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 30 ++++++++++++++++------
> drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 5 ++--
> drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 5 ++--
> drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 7 +++--
> .../gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h | 1 +
> 6 files changed, 41 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 36afabb..4805e45 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1592,16 +1592,19 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
> /*
> * UVD
> */
> -#define AMDGPU_MAX_UVD_HANDLES 10
> -#define AMDGPU_UVD_STACK_SIZE (1024*1024)
> -#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
> -#define AMDGPU_UVD_FIRMWARE_OFFSET 256
> +#define AMDGPU_DEFAULT_UVD_HANDLES 10
> +#define AMDGPU_MAX_UVD_HANDLES 40
> +#define AMDGPU_UVD_STACK_SIZE (200*1024)
> +#define AMDGPU_UVD_HEAP_SIZE (256*1024)
> +#define AMDGPU_UVD_SESSION_SIZE (50*1024)
> +#define AMDGPU_UVD_FIRMWARE_OFFSET 256
>
> struct amdgpu_uvd {
> struct amdgpu_bo *vcpu_bo;
> void *cpu_addr;
> uint64_t gpu_addr;
> void *saved_bo;
> + unsigned max_handles;
> atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
> struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
> struct delayed_work idle_work;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 338da80..76ebc10 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -151,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
> return r;
> }
>
> + /* Set the default UVD handles that the firmware can handle */
> + adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
> +
> hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
> family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
> version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
> @@ -158,8 +161,19 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
> DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
> version_major, version_minor, family_id);
>
> + /*
> + * Limit the number of UVD handles depending on microcode major
> + * and minor versions. The firmware version which has 40 UVD
> + * instances support is 1.80. So all subsequent versions should
> + * also have the same support.
> + */
> + if ((version_major > 0x01) ||
> + ((version_major == 0x01) && (version_minor >= 0x50)))
> + adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
> +
> bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
> - + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
> + + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
> + + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
> r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
> AMDGPU_GEM_DOMAIN_VRAM,
> AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
> @@ -202,7 +216,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
> return r;
> }
>
> - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
> + for (i = 0; i < adev->uvd.max_handles; ++i) {
> atomic_set(&adev->uvd.handles[i], 0);
> adev->uvd.filp[i] = NULL;
> }
> @@ -248,7 +262,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
> if (adev->uvd.vcpu_bo == NULL)
> return 0;
>
> - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
> + for (i = 0; i < adev->uvd.max_handles; ++i)
> if (atomic_read(&adev->uvd.handles[i]))
> break;
>
> @@ -303,7 +317,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
> struct amdgpu_ring *ring = &adev->uvd.ring;
> int i, r;
>
> - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
> + for (i = 0; i < adev->uvd.max_handles; ++i) {
> uint32_t handle = atomic_read(&adev->uvd.handles[i]);
> if (handle != 0 && adev->uvd.filp[i] == filp) {
> struct fence *fence;
> @@ -563,7 +577,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
> amdgpu_bo_kunmap(bo);
>
> /* try to alloc a new handle */
> - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
> + for (i = 0; i < adev->uvd.max_handles; ++i) {
> if (atomic_read(&adev->uvd.handles[i]) == handle) {
> DRM_ERROR("Handle 0x%x already in use!\n", handle);
> return -EINVAL;
> @@ -586,7 +600,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
> return r;
>
> /* validate the handle */
> - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
> + for (i = 0; i < adev->uvd.max_handles; ++i) {
> if (atomic_read(&adev->uvd.handles[i]) == handle) {
> if (adev->uvd.filp[i] != ctx->parser->filp) {
> DRM_ERROR("UVD handle collision detected!\n");
> @@ -601,7 +615,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
>
> case 2:
> /* it's a destroy msg, free the handle */
> - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
> + for (i = 0; i < adev->uvd.max_handles; ++i)
> atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
> amdgpu_bo_kunmap(bo);
> return 0;
> @@ -1013,7 +1027,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
>
> fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
>
> - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
> + for (i = 0; i < adev->uvd.max_handles; ++i)
> if (atomic_read(&adev->uvd.handles[i]))
> ++handles;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
> index cb46375..0d6b9e2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
> @@ -559,12 +559,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
> WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
>
> addr += size;
> - size = AMDGPU_UVD_STACK_SIZE >> 3;
> + size = AMDGPU_UVD_HEAP_SIZE >> 3;
> WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
> WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
>
> addr += size;
> - size = AMDGPU_UVD_HEAP_SIZE >> 3;
> + size = (AMDGPU_UVD_STACK_SIZE +
> + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
> WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
> WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
> index 16476d8..24f03d2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
> @@ -271,12 +271,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
> WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
>
> offset += size;
> - size = AMDGPU_UVD_STACK_SIZE;
> + size = AMDGPU_UVD_HEAP_SIZE;
> WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
> WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
>
> offset += size;
> - size = AMDGPU_UVD_HEAP_SIZE;
> + size = AMDGPU_UVD_STACK_SIZE +
> + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
> WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
> WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index d493791..b06cae6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> @@ -270,18 +270,21 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
> WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
>
> offset += size;
> - size = AMDGPU_UVD_STACK_SIZE;
> + size = AMDGPU_UVD_HEAP_SIZE;
> WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
> WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
>
> offset += size;
> - size = AMDGPU_UVD_HEAP_SIZE;
> + size = AMDGPU_UVD_STACK_SIZE +
> + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
> WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
> WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
>
> WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
> WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
> WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
> +
> + WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
> }
>
> static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
> diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
> index b2d4aaf..6f6fb34 100644
> --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
> +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
> @@ -111,5 +111,6 @@
> #define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
> #define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
> #define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
> +#define mmUVD_GP_SCRATCH4 0x3d38
>
> #endif /* UVD_6_0_D_H */
> --
> 1.9.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2016-04-12 15:22 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-04-12 11:46 [PATCH] drm/amdgpu: handle more than 10 UVD sessions (v2) Christian König
2016-04-12 13:14 ` Emil Velikov
2016-04-12 13:26 ` Christian König
2016-04-12 15:22 ` Alex Deucher
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.