* [PATCH] drm/amdgpu: switch to select_se_sh wrapper for gfx v9_0
@ 2022-10-11 21:28 Alex Deucher
2022-10-27 18:01 ` Alex Deucher
0 siblings, 1 reply; 3+ messages in thread
From: Alex Deucher @ 2022-10-11 21:28 UTC (permalink / raw)
To: amd-gfx; +Cc: Alex Deucher, Le Ma, Hawking Zhang
From: Hawking Zhang <Hawking.Zhang@amd.com>
To allow invoking ip specific callbacks
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Le Ma <le.ma@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
.../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 +--
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 28 +++++++++----------
2 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 81e3b528bbc9..e92b93557c13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -787,7 +787,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
for (se_idx = 0; se_idx < se_cnt; se_idx++) {
for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
- gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
/*
@@ -820,7 +820,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
soc15_grbm_select(adev, 0, 0, 0, 0);
unlock_spi_csq_mutexes(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 0320be4a5fc6..456c8e189b7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1564,7 +1564,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
mask = 1;
cu_bitmap = 0;
counter = 0;
- gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (cu_info->bitmap[i][j] & mask) {
@@ -1583,7 +1583,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1605,7 +1605,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
@@ -1654,7 +1654,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
@@ -2324,13 +2324,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v9_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -2467,14 +2467,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
udelay(1);
}
if (k == adev->usec_timeout) {
- gfx_v9_0_select_se_sh(adev, 0xffffffff,
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff,
0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
@@ -2483,7 +2483,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -6482,7 +6482,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
- gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+ amdgpu_gfx_select_se_sh(adev, j, 0x0, k);
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
}
}
@@ -6544,7 +6544,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
- gfx_v9_0_select_se_sh(adev, j, 0, k);
+ amdgpu_gfx_select_se_sh(adev, j, 0, k);
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
@@ -6559,7 +6559,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_0_query_utc_edc_status(adev, err_data);
@@ -6963,7 +6963,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
gfx_v9_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
@@ -6996,7 +6996,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
--
2.37.3
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] drm/amdgpu: switch to select_se_sh wrapper for gfx v9_0
2022-10-11 21:28 [PATCH] drm/amdgpu: switch to select_se_sh wrapper for gfx v9_0 Alex Deucher
@ 2022-10-27 18:01 ` Alex Deucher
2022-10-27 18:56 ` Felix Kuehling
0 siblings, 1 reply; 3+ messages in thread
From: Alex Deucher @ 2022-10-27 18:01 UTC (permalink / raw)
To: Alex Deucher; +Cc: Le Ma, amd-gfx, Hawking Zhang
Ping?
On Tue, Oct 11, 2022 at 5:28 PM Alex Deucher <alexander.deucher@amd.com> wrote:
>
> From: Hawking Zhang <Hawking.Zhang@amd.com>
>
> To allow invoking ip specific callbacks
>
> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
> Reviewed-by: Le Ma <le.ma@amd.com>
> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
> ---
> .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 +--
> drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 28 +++++++++----------
> 2 files changed, 16 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
> index 81e3b528bbc9..e92b93557c13 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
> @@ -787,7 +787,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
> for (se_idx = 0; se_idx < se_cnt; se_idx++) {
> for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
>
> - gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
> queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
>
> /*
> @@ -820,7 +820,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
> }
> }
>
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> soc15_grbm_select(adev, 0, 0, 0, 0);
> unlock_spi_csq_mutexes(adev);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index 0320be4a5fc6..456c8e189b7a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -1564,7 +1564,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
> mask = 1;
> cu_bitmap = 0;
> counter = 0;
> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
>
> for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
> if (cu_info->bitmap[i][j] & mask) {
> @@ -1583,7 +1583,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
> cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
> }
> }
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> mutex_unlock(&adev->grbm_idx_mutex);
> }
>
> @@ -1605,7 +1605,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
>
> mutex_lock(&adev->grbm_idx_mutex);
> /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
>
> /* set mmRLC_LB_PARAMS = 0x003F_1006 */
> @@ -1654,7 +1654,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
>
> mutex_lock(&adev->grbm_idx_mutex);
> /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
>
> /* set mmRLC_LB_PARAMS = 0x003F_1006 */
> @@ -2324,13 +2324,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
> mutex_lock(&adev->grbm_idx_mutex);
> for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
> for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
> data = gfx_v9_0_get_rb_active_bitmap(adev);
> active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
> rb_bitmap_width_per_sh);
> }
> }
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> mutex_unlock(&adev->grbm_idx_mutex);
>
> adev->gfx.config.backend_enable_mask = active_rbs;
> @@ -2467,14 +2467,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
> mutex_lock(&adev->grbm_idx_mutex);
> for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
> for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
> for (k = 0; k < adev->usec_timeout; k++) {
> if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
> break;
> udelay(1);
> }
> if (k == adev->usec_timeout) {
> - gfx_v9_0_select_se_sh(adev, 0xffffffff,
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff,
> 0xffffffff, 0xffffffff);
> mutex_unlock(&adev->grbm_idx_mutex);
> DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
> @@ -2483,7 +2483,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
> }
> }
> }
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> mutex_unlock(&adev->grbm_idx_mutex);
>
> mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
> @@ -6482,7 +6482,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
> for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
> for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
> for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
> - gfx_v9_0_select_se_sh(adev, j, 0x0, k);
> + amdgpu_gfx_select_se_sh(adev, j, 0x0, k);
> RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
> }
> }
> @@ -6544,7 +6544,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
> for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
> for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
> for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
> - gfx_v9_0_select_se_sh(adev, j, 0, k);
> + amdgpu_gfx_select_se_sh(adev, j, 0, k);
> reg_value =
> RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
> if (reg_value)
> @@ -6559,7 +6559,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
> err_data->ce_count += sec_count;
> err_data->ue_count += ded_count;
>
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> mutex_unlock(&adev->grbm_idx_mutex);
>
> gfx_v9_0_query_utc_edc_status(adev, err_data);
> @@ -6963,7 +6963,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
> mask = 1;
> ao_bitmap = 0;
> counter = 0;
> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
> gfx_v9_0_set_user_cu_inactive_bitmap(
> adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
> bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
> @@ -6996,7 +6996,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
> cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
> }
> }
> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
> mutex_unlock(&adev->grbm_idx_mutex);
>
> cu_info->number = active_cu_number;
> --
> 2.37.3
>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] drm/amdgpu: switch to select_se_sh wrapper for gfx v9_0
2022-10-27 18:01 ` Alex Deucher
@ 2022-10-27 18:56 ` Felix Kuehling
0 siblings, 0 replies; 3+ messages in thread
From: Felix Kuehling @ 2022-10-27 18:56 UTC (permalink / raw)
To: Alex Deucher, Alex Deucher; +Cc: Le Ma, amd-gfx, Hawking Zhang
Am 2022-10-27 um 14:01 schrieb Alex Deucher:
> Ping?
The patch already has a R-b from Ma Le. Anyway, the patch is
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
>
> On Tue, Oct 11, 2022 at 5:28 PM Alex Deucher <alexander.deucher@amd.com> wrote:
>> From: Hawking Zhang <Hawking.Zhang@amd.com>
>>
>> To allow invoking ip specific callbacks
>>
>> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
>> Reviewed-by: Le Ma <le.ma@amd.com>
>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>> ---
>> .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 +--
>> drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 28 +++++++++----------
>> 2 files changed, 16 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
>> index 81e3b528bbc9..e92b93557c13 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
>> @@ -787,7 +787,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
>> for (se_idx = 0; se_idx < se_cnt; se_idx++) {
>> for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
>>
>> - gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
>> queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
>>
>> /*
>> @@ -820,7 +820,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
>> }
>> }
>>
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> soc15_grbm_select(adev, 0, 0, 0, 0);
>> unlock_spi_csq_mutexes(adev);
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> index 0320be4a5fc6..456c8e189b7a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> @@ -1564,7 +1564,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
>> mask = 1;
>> cu_bitmap = 0;
>> counter = 0;
>> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
>>
>> for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
>> if (cu_info->bitmap[i][j] & mask) {
>> @@ -1583,7 +1583,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
>> cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
>> }
>> }
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> mutex_unlock(&adev->grbm_idx_mutex);
>> }
>>
>> @@ -1605,7 +1605,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
>>
>> mutex_lock(&adev->grbm_idx_mutex);
>> /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
>>
>> /* set mmRLC_LB_PARAMS = 0x003F_1006 */
>> @@ -1654,7 +1654,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
>>
>> mutex_lock(&adev->grbm_idx_mutex);
>> /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
>>
>> /* set mmRLC_LB_PARAMS = 0x003F_1006 */
>> @@ -2324,13 +2324,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
>> mutex_lock(&adev->grbm_idx_mutex);
>> for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
>> for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
>> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
>> data = gfx_v9_0_get_rb_active_bitmap(adev);
>> active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
>> rb_bitmap_width_per_sh);
>> }
>> }
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> mutex_unlock(&adev->grbm_idx_mutex);
>>
>> adev->gfx.config.backend_enable_mask = active_rbs;
>> @@ -2467,14 +2467,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
>> mutex_lock(&adev->grbm_idx_mutex);
>> for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
>> for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
>> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
>> for (k = 0; k < adev->usec_timeout; k++) {
>> if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
>> break;
>> udelay(1);
>> }
>> if (k == adev->usec_timeout) {
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff,
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff,
>> 0xffffffff, 0xffffffff);
>> mutex_unlock(&adev->grbm_idx_mutex);
>> DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
>> @@ -2483,7 +2483,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
>> }
>> }
>> }
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> mutex_unlock(&adev->grbm_idx_mutex);
>>
>> mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
>> @@ -6482,7 +6482,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
>> for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
>> for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
>> for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
>> - gfx_v9_0_select_se_sh(adev, j, 0x0, k);
>> + amdgpu_gfx_select_se_sh(adev, j, 0x0, k);
>> RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
>> }
>> }
>> @@ -6544,7 +6544,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
>> for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
>> for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
>> for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
>> - gfx_v9_0_select_se_sh(adev, j, 0, k);
>> + amdgpu_gfx_select_se_sh(adev, j, 0, k);
>> reg_value =
>> RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
>> if (reg_value)
>> @@ -6559,7 +6559,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
>> err_data->ce_count += sec_count;
>> err_data->ue_count += ded_count;
>>
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> mutex_unlock(&adev->grbm_idx_mutex);
>>
>> gfx_v9_0_query_utc_edc_status(adev, err_data);
>> @@ -6963,7 +6963,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
>> mask = 1;
>> ao_bitmap = 0;
>> counter = 0;
>> - gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
>> gfx_v9_0_set_user_cu_inactive_bitmap(
>> adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
>> bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
>> @@ -6996,7 +6996,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
>> cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
>> }
>> }
>> - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
>> mutex_unlock(&adev->grbm_idx_mutex);
>>
>> cu_info->number = active_cu_number;
>> --
>> 2.37.3
>>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2022-10-27 18:56 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-11 21:28 [PATCH] drm/amdgpu: switch to select_se_sh wrapper for gfx v9_0 Alex Deucher
2022-10-27 18:01 ` Alex Deucher
2022-10-27 18:56 ` Felix Kuehling
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.