* [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex
@ 2022-01-17 5:41 Evan Quan
2022-01-17 5:41 ` [PATCH V2 2/7] drm/amd/pm: drop unneeded vcn/jpeg_gate_lock Evan Quan
` (8 more replies)
0 siblings, 9 replies; 14+ messages in thread
From: Evan Quan @ 2022-01-17 5:41 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, Guchun.Chen
As all those APIs are already protected either by adev->pm.mutex
or smu->message_lock.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I1db751fba9caabc5ca1314992961d3674212f9b0
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 315 ++----------------
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
.../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 -
.../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 -
.../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 -
.../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 -
6 files changed, 25 insertions(+), 299 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 828cb932f6a9..411f03eb4523 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -55,8 +55,7 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
uint32_t mask);
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed);
+ enum amd_pp_task task_id);
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
static int smu_set_fan_control_mode(void *handle, u32 value);
@@ -68,36 +67,22 @@ static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
{
struct smu_context *smu = handle;
- int size = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu_get_pp_feature_mask(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu_get_pp_feature_mask(smu, buf);
}
static int smu_sys_set_pp_feature_mask(void *handle,
uint64_t new_mask)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_pp_feature_mask(smu, new_mask);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_pp_feature_mask(smu, new_mask);
}
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
@@ -117,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu,
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -140,16 +121,12 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
if (!min && !max)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_ultimate_freq)
ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -482,7 +459,6 @@ static int smu_sys_get_pp_table(void *handle,
{
struct smu_context *smu = handle;
struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t powerplay_table_size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -490,18 +466,12 @@ static int smu_sys_get_pp_table(void *handle,
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- powerplay_table_size = smu_table->power_play_table_size;
-
- mutex_unlock(&smu->mutex);
-
- return powerplay_table_size;
+ return smu_table->power_play_table_size;
}
static int smu_sys_set_pp_table(void *handle,
@@ -521,13 +491,10 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- mutex_lock(&smu->mutex);
if (!smu_table->hardcode_pptable)
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
- if (!smu_table->hardcode_pptable) {
- ret = -ENOMEM;
- goto failed;
- }
+ if (!smu_table->hardcode_pptable)
+ return -ENOMEM;
memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable;
@@ -545,8 +512,6 @@ static int smu_sys_set_pp_table(void *handle,
smu->uploading_custom_pp_table = false;
-failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -633,7 +598,6 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- mutex_init(&smu->mutex);
mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
@@ -736,8 +700,7 @@ static int smu_late_init(void *handle)
smu_handle_task(smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT,
- false);
+ AMD_PP_TASK_COMPLETE_INIT);
smu_restore_dpm_user_profile(smu);
@@ -1013,12 +976,8 @@ static void smu_interrupt_work_fn(struct work_struct *work)
struct smu_context *smu = container_of(work, struct smu_context,
interrupt_work);
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
smu->ppt_funcs->interrupt_work(smu);
-
- mutex_unlock(&smu->mutex);
}
static int smu_sw_init(void *handle)
@@ -1632,8 +1591,6 @@ static int smu_display_configuration_change(void *handle,
if (!display_config)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
smu_set_min_dcef_deep_sleep(smu,
display_config->min_dcef_deep_sleep_set_clk / 100);
@@ -1642,8 +1599,6 @@ static int smu_display_configuration_change(void *handle,
num_of_active_display++;
}
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1766,22 +1721,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed)
+ enum amd_pp_task task_id)
{
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- if (lock_needed)
- mutex_lock(&smu->mutex);
-
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- goto out;
+ return ret;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1792,10 +1743,6 @@ static int smu_handle_task(struct smu_context *smu,
break;
}
-out:
- if (lock_needed)
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -1806,7 +1753,7 @@ static int smu_handle_dpm_task(void *handle,
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
+ return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
}
@@ -1825,8 +1772,6 @@ static int smu_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (!en) {
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
@@ -1843,8 +1788,6 @@ static int smu_switch_power_profile(void *handle,
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, &workload, 0);
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1852,7 +1795,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- enum amd_dpm_forced_level level;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -1860,11 +1802,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&(smu->mutex));
- level = smu_dpm_ctx->dpm_level;
- mutex_unlock(&(smu->mutex));
-
- return level;
+ return smu_dpm_ctx->dpm_level;
}
static int smu_force_performance_level(void *handle,
@@ -1880,19 +1818,12 @@ static int smu_force_performance_level(void *handle,
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu_enable_umd_pstate(smu, &level);
- if (ret) {
- mutex_unlock(&smu->mutex);
+ if (ret)
return ret;
- }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE,
- false);
-
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_READJUST_POWER_STATE);
/* reset user dpm clock state */
if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1906,16 +1837,11 @@ static int smu_force_performance_level(void *handle,
static int smu_set_display_count(void *handle, uint32_t count)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
- ret = smu_init_display_count(smu, count);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_init_display_count(smu, count);
}
static int smu_force_smuclk_levels(struct smu_context *smu,
@@ -1933,8 +1859,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
return -EINVAL;
}
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
@@ -1943,8 +1867,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2003,14 +1925,10 @@ static int smu_set_mp1_state(void *handle,
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs &&
smu->ppt_funcs->set_mp1_state)
ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2026,14 +1944,10 @@ static int smu_set_df_cstate(void *handle,
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_df_cstate(smu, state);
if (ret)
dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2047,38 +1961,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
if (ret)
dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
int smu_write_watermarks_table(struct smu_context *smu)
{
- int ret = 0;
-
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, NULL);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, NULL);
}
static int smu_set_watermarks_for_clock_ranges(void *handle,
struct pp_smu_wm_range_sets *clock_ranges)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -2086,13 +1987,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
if (smu->disable_watermark)
return 0;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, clock_ranges);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, clock_ranges);
}
int smu_set_ac_dc(struct smu_context *smu)
@@ -2106,14 +2001,12 @@ int smu_set_ac_dc(struct smu_context *smu)
if (smu->dc_controlled_by_gpio)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_power_source(smu,
smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (ret)
dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
smu->adev->pm.ac_power ? "AC" : "DC");
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2200,13 +2093,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_gfx_cgpg)
ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2224,8 +2113,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
if (speed == U32_MAX)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
@@ -2236,8 +2123,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
smu->user_dpm_profile.fan_speed_pwm = 0;
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2293,8 +2178,6 @@ int smu_get_power_limit(void *handle,
break;
}
- mutex_lock(&smu->mutex);
-
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
if (smu->ppt_funcs->get_ppt_limit)
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
@@ -2328,8 +2211,6 @@ int smu_get_power_limit(void *handle,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2342,21 +2223,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
limit &= (1<<24)-1;
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- goto out;
- }
+ if (smu->ppt_funcs->set_power_limit)
+ return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
if (limit > smu->max_power_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
limit, smu->max_power_limit);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!limit)
@@ -2368,9 +2244,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
smu->user_dpm_profile.power_limit = limit;
}
-out:
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2381,13 +2254,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->print_clk_levels)
ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2444,14 +2313,10 @@ static int smu_od_edit_dpm_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->od_edit_dpm_table) {
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2475,8 +2340,6 @@ static int smu_read_sensor(void *handle,
size_val = *size_arg;
size = &size_val;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->read_sensor)
if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
goto unlock;
@@ -2517,8 +2380,6 @@ static int smu_read_sensor(void *handle,
}
unlock:
- mutex_unlock(&smu->mutex);
-
// assign uint32_t to int
*size_arg = size_val;
@@ -2528,7 +2389,6 @@ static int smu_read_sensor(void *handle,
static int smu_get_power_profile_mode(void *handle, char *buf)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->get_power_profile_mode)
@@ -2536,13 +2396,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
if (!buf)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
- ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu->ppt_funcs->get_power_profile_mode(smu, buf);
}
static int smu_set_power_profile_mode(void *handle,
@@ -2550,19 +2404,12 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- smu_bump_power_profile_mode(smu, param, param_size);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_bump_power_profile_mode(smu, param, param_size);
}
@@ -2579,12 +2426,8 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
if (!fan_mode)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -2602,8 +2445,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
if (value == U32_MAX)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
if (ret)
goto out;
@@ -2620,8 +2461,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
}
out:
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2639,12 +2478,8 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
if (!speed)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2662,8 +2497,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
if (speed == U32_MAX)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
@@ -2674,8 +2507,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
smu->user_dpm_profile.fan_speed_rpm = 0;
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2693,30 +2524,19 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
if (!speed)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
- mutex_unlock(&smu->mutex);
-
return ret;
}
static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_min_dcef_deep_sleep(smu, clk);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_min_dcef_deep_sleep(smu, clk);
}
static int smu_get_clock_by_type_with_latency(void *handle,
@@ -2730,8 +2550,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_clock_by_type_with_latency) {
switch (type) {
case amd_pp_sys_clock:
@@ -2748,15 +2566,12 @@ static int smu_get_clock_by_type_with_latency(void *handle,
break;
default:
dev_err(smu->adev->dev, "Invalid clock type!\n");
- mutex_unlock(&smu->mutex);
return -EINVAL;
}
ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2769,13 +2584,9 @@ static int smu_display_clock_voltage_request(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_clock_voltage_request)
ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2789,13 +2600,9 @@ static int smu_display_disable_memory_clock_switch(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_disable_memory_clock_switch)
ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2808,13 +2615,9 @@ static int smu_set_xgmi_pstate(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_xgmi_pstate)
ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
- mutex_unlock(&smu->mutex);
-
if(ret)
dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
@@ -2824,21 +2627,16 @@ static int smu_set_xgmi_pstate(void *handle,
static int smu_get_baco_capability(void *handle, bool *cap)
{
struct smu_context *smu = handle;
- int ret = 0;
*cap = false;
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
*cap = smu->ppt_funcs->baco_is_support(smu);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return 0;
}
static int smu_baco_set_state(void *handle, int state)
@@ -2850,20 +2648,11 @@ static int smu_baco_set_state(void *handle, int state)
return -EOPNOTSUPP;
if (state == 0) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_exit)
ret = smu->ppt_funcs->baco_exit(smu);
-
- mutex_unlock(&smu->mutex);
} else if (state == 1) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_enter)
ret = smu->ppt_funcs->baco_enter(smu);
-
- mutex_unlock(&smu->mutex);
-
} else {
return -EINVAL;
}
@@ -2882,13 +2671,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
ret = smu->ppt_funcs->mode1_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2899,13 +2684,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
ret = smu->ppt_funcs->mode2_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2916,13 +2697,9 @@ int smu_mode1_reset(struct smu_context *smu)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode1_reset)
ret = smu->ppt_funcs->mode1_reset(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2934,13 +2711,9 @@ static int smu_mode2_reset(void *handle)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode2_reset)
ret = smu->ppt_funcs->mode2_reset(smu);
- mutex_unlock(&smu->mutex);
-
if (ret)
dev_err(smu->adev->dev, "Mode2 reset failed!\n");
@@ -2956,13 +2729,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2976,13 +2745,9 @@ static int smu_get_uclk_dpm_states(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_uclk_dpm_states)
ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2994,13 +2759,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_current_power_state)
pm_state = smu->ppt_funcs->get_current_power_state(smu);
- mutex_unlock(&smu->mutex);
-
return pm_state;
}
@@ -3013,20 +2774,15 @@ static int smu_get_dpm_clock_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_clock_table)
ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
- mutex_unlock(&smu->mutex);
-
return ret;
}
static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
{
struct smu_context *smu = handle;
- ssize_t size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -3034,13 +2790,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
if (!smu->ppt_funcs->get_gpu_metrics)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu->ppt_funcs->get_gpu_metrics(smu, table);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu->ppt_funcs->get_gpu_metrics(smu, table);
}
static int smu_enable_mgpu_fan_boost(void *handle)
@@ -3051,13 +2801,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->enable_mgpu_fan_boost)
ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -3067,10 +2813,8 @@ static int smu_gfx_state_change_set(void *handle,
struct smu_context *smu = handle;
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->gfx_state_change_set)
ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3079,10 +2823,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->smu_handle_passthrough_sbr)
ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3091,11 +2833,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
{
int ret = -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs &&
smu->ppt_funcs->get_ecc_info)
ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
- mutex_unlock(&smu->mutex);
return ret;
@@ -3112,12 +2852,10 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
- mutex_lock(&smu->mutex);
if (memory_pool->bo) {
*addr = memory_pool->cpu_addr;
*size = memory_pool->size;
}
- mutex_unlock(&smu->mutex);
return 0;
}
@@ -3181,11 +2919,8 @@ int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
{
int ret = -EINVAL;
- if (smu->ppt_funcs->wait_for_event) {
- mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->wait_for_event)
ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
- mutex_unlock(&smu->mutex);
- }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3fdab6a44901..00760f3c6da5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -488,7 +488,6 @@ struct smu_context
const struct cmn2asic_mapping *table_map;
const struct cmn2asic_mapping *pwr_src_map;
const struct cmn2asic_mapping *workload_map;
- struct mutex mutex;
struct mutex sensor_lock;
struct mutex metrics_lock;
struct mutex message_lock;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index d3963bfe5c89..addb0472d040 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2118,9 +2118,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 37e11716e919..fe17b3c1ece7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2826,9 +2826,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9766870987db..93caaf45a2db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3483,9 +3483,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index ac8ba5e0e697..2546f79c8511 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1521,9 +1521,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH V2 2/7] drm/amd/pm: drop unneeded vcn/jpeg_gate_lock
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
@ 2022-01-17 5:41 ` Evan Quan
2022-01-17 5:41 ` [PATCH V2 3/7] drm/amd/pm: drop unneeded smu->metrics_lock Evan Quan
` (7 subsequent siblings)
8 siblings, 0 replies; 14+ messages in thread
From: Evan Quan @ 2022-01-17 5:41 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, Guchun.Chen
As those related APIs are already protected by adev->pm.mutex.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I762fab96bb1c034c153b029f939ec6e498460007
--
v1->v2:
- optimize the label for error exit(Guchun)
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 63 ++++---------------
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 2 -
2 files changed, 11 insertions(+), 54 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 411f03eb4523..1a560d2702db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -158,8 +158,8 @@ static u32 smu_get_sclk(void *handle, bool low)
return clk_freq * 100;
}
-static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -178,24 +178,8 @@ static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
return ret;
}
-static int smu_dpm_set_vcn_enable(struct smu_context *smu,
- bool enable)
-{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- int ret = 0;
-
- mutex_lock(&power_gate->vcn_gate_lock);
-
- ret = smu_dpm_set_vcn_enable_locked(smu, enable);
-
- mutex_unlock(&power_gate->vcn_gate_lock);
-
- return ret;
-}
-
-static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -214,22 +198,6 @@ static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
return ret;
}
-static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
- bool enable)
-{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- int ret = 0;
-
- mutex_lock(&power_gate->jpeg_gate_lock);
-
- ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
-
- mutex_unlock(&power_gate->jpeg_gate_lock);
-
- return ret;
-}
-
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -619,32 +587,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- mutex_lock(&power_gate->vcn_gate_lock);
- mutex_lock(&power_gate->jpeg_gate_lock);
-
vcn_gate = atomic_read(&power_gate->vcn_gated);
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
- ret = smu_dpm_set_vcn_enable_locked(smu, true);
+ ret = smu_dpm_set_vcn_enable(smu, true);
if (ret)
- goto err0_out;
+ return ret;
- ret = smu_dpm_set_jpeg_enable_locked(smu, true);
+ ret = smu_dpm_set_jpeg_enable(smu, true);
if (ret)
- goto err1_out;
+ goto err_out;
ret = smu->ppt_funcs->set_default_dpm_table(smu);
if (ret)
dev_err(smu->adev->dev,
"Failed to setup default dpm clock tables!\n");
- smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
-err1_out:
- smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
-err0_out:
- mutex_unlock(&power_gate->jpeg_gate_lock);
- mutex_unlock(&power_gate->vcn_gate_lock);
-
+ smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
+err_out:
+ smu_dpm_set_vcn_enable(smu, !vcn_gate);
return ret;
}
@@ -1006,8 +967,6 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
- mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
- mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 00760f3c6da5..c3efe4fea5e0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -376,8 +376,6 @@ struct smu_power_gate {
bool vce_gated;
atomic_t vcn_gated;
atomic_t jpeg_gated;
- struct mutex vcn_gate_lock;
- struct mutex jpeg_gate_lock;
};
struct smu_power_context {
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH V2 3/7] drm/amd/pm: drop unneeded smu->metrics_lock
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
2022-01-17 5:41 ` [PATCH V2 2/7] drm/amd/pm: drop unneeded vcn/jpeg_gate_lock Evan Quan
@ 2022-01-17 5:41 ` Evan Quan
2022-01-17 5:41 ` [PATCH V2 4/7] drm/amd/pm: drop unneeded smu->sensor_lock Evan Quan
` (6 subsequent siblings)
8 siblings, 0 replies; 14+ messages in thread
From: Evan Quan @ 2022-01-17 5:41 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, Guchun.Chen
As all those related APIs are already well protected by
adev->pm.mutex and smu->message_lock.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: Ic75326ba7b4b67be8762d5407d02f6c514e1ad35
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 -
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
.../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 14 +--
.../amd/pm/swsmu/smu11/cyan_skillfish_ppt.c | 10 +-
.../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 112 +++++-------------
.../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 27 ++---
.../gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 28 ++---
.../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 14 +--
.../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 23 ++--
.../drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 10 +-
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 21 +---
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h | 4 -
12 files changed, 70 insertions(+), 195 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 1a560d2702db..3123efe339ae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -955,7 +955,6 @@ static int smu_sw_init(void *handle)
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
mutex_init(&smu->sensor_lock);
- mutex_init(&smu->metrics_lock);
mutex_init(&smu->message_lock);
INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index c3efe4fea5e0..63ed807c96f5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -487,7 +487,6 @@ struct smu_context
const struct cmn2asic_mapping *pwr_src_map;
const struct cmn2asic_mapping *workload_map;
struct mutex sensor_lock;
- struct mutex metrics_lock;
struct mutex message_lock;
uint64_t pool_size;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index addb0472d040..3f7c1f23475b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -602,15 +602,11 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -693,8 +689,6 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 2238ee19c222..7ae6b1bd648a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -150,13 +150,9 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -200,8 +196,6 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index fe17b3c1ece7..fdb059e7c6ba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -546,15 +546,11 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -624,8 +620,6 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -638,15 +632,11 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -719,8 +709,6 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -733,15 +721,11 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -811,8 +795,6 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -825,15 +807,11 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_NV12_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -906,8 +884,6 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -2708,20 +2684,14 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
SmuMetrics_legacy_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2899,20 +2869,14 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2977,20 +2941,14 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
SmuMetrics_NV12_legacy_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -3058,20 +3016,14 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_NV12_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 93caaf45a2db..2241250c2d2a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -525,15 +525,11 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint16_t average_gfx_activity;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -633,8 +629,6 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -3564,14 +3558,11 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
uint16_t average_gfx_activity;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu,
- &metrics_external,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ &metrics_external,
+ true);
+ if (ret)
return ret;
- }
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
@@ -3661,8 +3652,6 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
smu_v11_0_get_current_pcie_link_speed(smu);
}
- mutex_unlock(&smu->metrics_lock);
-
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5cb07ed227fb..c736adca6fbb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -273,15 +273,11 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -335,8 +331,6 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -348,15 +342,11 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -410,8 +400,6 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 25c4b135f830..d75508085578 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1128,15 +1128,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_AVERAGE_GFXCLK:
@@ -1201,8 +1197,6 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 2546f79c8511..1661a958421f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -571,15 +571,11 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -653,8 +649,6 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1592,17 +1586,14 @@ static void aldebaran_get_unique_id(struct smu_context *smu)
uint32_t upper32 = 0, lower32 = 0;
int ret;
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
if (ret)
- goto out_unlock;
+ goto out;
upper32 = metrics->PublicSerialNumUpper32;
lower32 = metrics->PublicSerialNumLower32;
-out_unlock:
- mutex_unlock(&smu->metrics_lock);
-
+out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
if (adev->serial[0] == '\0')
sprintf(adev->serial, "%016llx", adev->unique_id);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index caf1775d48ef..451d30dcc639 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -310,13 +310,9 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_AVERAGE_GFXCLK:
@@ -387,8 +383,6 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 735e1a1e365d..d78e4f689a2a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -964,9 +964,9 @@ int smu_cmn_write_pptable(struct smu_context *smu)
true);
}
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
+int smu_cmn_get_metrics_table(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table= &smu->smu_table;
uint32_t table_size =
@@ -994,21 +994,6 @@ int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
return 0;
}
-int smu_cmn_get_metrics_table(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
-{
- int ret = 0;
-
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu,
- metrics_table,
- bypass_cache);
- mutex_unlock(&smu->metrics_lock);
-
- return ret;
-}
-
void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
{
struct metrics_table_header *header = (struct metrics_table_header *)table;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 67a25da79256..f0b4fb2a0960 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -101,10 +101,6 @@ int smu_cmn_write_watermarks_table(struct smu_context *smu);
int smu_cmn_write_pptable(struct smu_context *smu);
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache);
-
int smu_cmn_get_metrics_table(struct smu_context *smu,
void *metrics_table,
bool bypass_cache);
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH V2 4/7] drm/amd/pm: drop unneeded smu->sensor_lock
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
2022-01-17 5:41 ` [PATCH V2 2/7] drm/amd/pm: drop unneeded vcn/jpeg_gate_lock Evan Quan
2022-01-17 5:41 ` [PATCH V2 3/7] drm/amd/pm: drop unneeded smu->metrics_lock Evan Quan
@ 2022-01-17 5:41 ` Evan Quan
2022-01-17 5:41 ` [PATCH V2 5/7] drm/amd/pm: drop unneeded smu_baco->mutex Evan Quan
` (5 subsequent siblings)
8 siblings, 0 replies; 14+ messages in thread
From: Evan Quan @ 2022-01-17 5:41 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, Guchun.Chen
As all those related APIs are already well protected by
adev->pm.mutex and smu->message_lock.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I20974b2ae68d63525bc7c7f406fede2971c5fecc
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 -
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 --
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c | 4 ----
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 --
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 --
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 2 --
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 2 --
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 --
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 2 --
10 files changed, 20 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 3123efe339ae..ccfbbb6c0b28 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -954,7 +954,6 @@ static int smu_sw_init(void *handle)
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
- mutex_init(&smu->sensor_lock);
mutex_init(&smu->message_lock);
INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 63ed807c96f5..2cef7ff46010 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -486,7 +486,6 @@ struct smu_context
const struct cmn2asic_mapping *table_map;
const struct cmn2asic_mapping *pwr_src_map;
const struct cmn2asic_mapping *workload_map;
- struct mutex sensor_lock;
struct mutex message_lock;
uint64_t pool_size;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 3f7c1f23475b..ad529e0f45a5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1113,7 +1113,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1174,7 +1173,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 7ae6b1bd648a..2acd7470431e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -209,8 +209,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
-
switch (sensor) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu,
@@ -261,8 +259,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->sensor_lock);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index fdb059e7c6ba..21354cb4ddec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1964,7 +1964,6 @@ static int navi10_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -2024,7 +2023,6 @@ static int navi10_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 2241250c2d2a..a938511362a8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1605,7 +1605,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
GET_PPTABLE_MEMBER(FanMaximumRpm, &temp);
@@ -1666,7 +1665,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index c736adca6fbb..721027917f81 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1494,7 +1494,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = vangogh_common_get_smu_metrics_data(smu,
@@ -1556,7 +1555,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index d75508085578..e99e7b2bf25b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1209,7 +1209,6 @@ static int renoir_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = renoir_get_smu_metrics_data(smu,
@@ -1277,7 +1276,6 @@ static int renoir_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 1661a958421f..b71f14af009d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1141,7 +1141,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MEM_LOAD:
case AMDGPU_PP_SENSOR_GPU_LOAD:
@@ -1180,7 +1179,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 451d30dcc639..bd24a2632214 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -395,7 +395,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = yellow_carp_get_smu_metrics_data(smu,
@@ -463,7 +462,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH V2 5/7] drm/amd/pm: drop unneeded smu_baco->mutex
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
` (2 preceding siblings ...)
2022-01-17 5:41 ` [PATCH V2 4/7] drm/amd/pm: drop unneeded smu->sensor_lock Evan Quan
@ 2022-01-17 5:41 ` Evan Quan
2022-01-17 5:41 ` [PATCH V2 6/7] drm/amd/pm: drop unneeded feature->mutex Evan Quan
` (4 subsequent siblings)
8 siblings, 0 replies; 14+ messages in thread
From: Evan Quan @ 2022-01-17 5:41 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, Guchun.Chen
As those APIs related are already well protected by adev->pm.mutex.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I8a7d8da5710698a98dd0f7e70c244be57474b573
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 -
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
.../gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 19 +++++--------------
3 files changed, 5 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ccfbbb6c0b28..7cc3886ddee4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -566,7 +566,6 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 2cef7ff46010..79b2a817491c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -428,7 +428,6 @@ enum smu_baco_state
struct smu_baco_context
{
- struct mutex mutex;
uint32_t state;
bool platform_support;
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 9acf2c045a97..0d85dc2f9cd4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1615,13 +1615,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
- enum smu_baco_state baco_state;
- mutex_lock(&smu_baco->mutex);
- baco_state = smu_baco->state;
- mutex_unlock(&smu_baco->mutex);
-
- return baco_state;
+ return smu_baco->state;
}
#define D3HOT_BACO_SEQUENCE 0
@@ -1638,8 +1633,6 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (smu_v11_0_baco_get_state(smu) == state)
return 0;
- mutex_lock(&smu_baco->mutex);
-
if (state == SMU_BACO_STATE_ENTER) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(11, 0, 7):
@@ -1680,18 +1673,16 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
} else {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
- goto out;
+ return ret;
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
}
- if (ret)
- goto out;
- smu_baco->state = state;
-out:
- mutex_unlock(&smu_baco->mutex);
+ if (!ret)
+ smu_baco->state = state;
+
return ret;
}
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH V2 6/7] drm/amd/pm: drop unneeded feature->mutex
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
` (3 preceding siblings ...)
2022-01-17 5:41 ` [PATCH V2 5/7] drm/amd/pm: drop unneeded smu_baco->mutex Evan Quan
@ 2022-01-17 5:41 ` Evan Quan
2022-01-17 5:41 ` [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock Evan Quan
` (3 subsequent siblings)
8 siblings, 0 replies; 14+ messages in thread
From: Evan Quan @ 2022-01-17 5:41 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, Guchun.Chen
As all those related APIs are already well protected by adev->pm.mutex.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: Ia2c752ff22e8f23601484f48b66151cfda8c01b5
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 -
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
.../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 20 ++++++++-----------
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 19 ++----------------
4 files changed, 10 insertions(+), 31 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7cc3886ddee4..198c2ac7b04d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -948,7 +948,6 @@ static int smu_sw_init(void *handle)
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
- mutex_init(&smu->smu_feature.mutex);
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 79b2a817491c..18f24db7d202 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -391,7 +391,6 @@ struct smu_feature
DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
- struct mutex mutex;
};
struct smu_clocks {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index f66d8b9135ca..9b439066a08b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -722,25 +722,21 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
- mutex_lock(&feature->mutex);
- if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
- goto failed;
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ feature->feature_num < 64)
+ return -EINVAL;
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
if (ret)
- goto failed;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
- feature_mask[0], NULL);
- if (ret)
- goto failed;
+ return ret;
-failed:
- mutex_unlock(&feature->mutex);
- return ret;
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0],
+ NULL);
}
int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index d78e4f689a2a..fcead7c6ca7e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -481,7 +481,6 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- int ret = 0;
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
@@ -491,11 +490,7 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
WARN_ON(feature_id > feature->feature_num);
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->supported);
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return test_bit(feature_id, feature->supported);
}
int smu_cmn_feature_is_enabled(struct smu_context *smu,
@@ -504,7 +499,6 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
int feature_id;
- int ret = 0;
if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
return 1;
@@ -517,11 +511,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
WARN_ON(feature_id > feature->feature_num);
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->enabled);
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return test_bit(feature_id, feature->enabled);
}
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
@@ -666,14 +656,12 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
return ret;
}
- mutex_lock(&feature->mutex);
if (enabled)
bitmap_or(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
else
bitmap_andnot(feature->enabled, feature->enabled,
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
return ret;
}
@@ -843,11 +831,8 @@ int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
}
if (no_hw_disablement) {
- mutex_lock(&feature->mutex);
bitmap_andnot(feature->enabled, feature->enabled,
(unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
return 0;
} else {
return smu_cmn_feature_update_enable_state(smu,
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
` (4 preceding siblings ...)
2022-01-17 5:41 ` [PATCH V2 6/7] drm/amd/pm: drop unneeded feature->mutex Evan Quan
@ 2022-01-17 5:41 ` Evan Quan
2022-01-20 11:51 ` Quan, Evan
2022-01-20 13:37 ` [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Chen, Guchun
` (2 subsequent siblings)
8 siblings, 1 reply; 14+ messages in thread
From: Evan Quan @ 2022-01-17 5:41 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, Guchun.Chen
As all those related APIs are already well protected by adev->pm.mutex.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I36426791d3bbc9d84a6ae437da26a892682eb0cb
---
.../gpu/drm/amd/pm/powerplay/amd_powerplay.c | 278 +++---------------
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 1 -
2 files changed, 38 insertions(+), 241 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 76c26ae368f9..a2da46bf3985 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -50,7 +50,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
- mutex_init(&hwmgr->smu_lock);
mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
@@ -178,12 +177,9 @@ static int pp_late_init(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- if (hwmgr && hwmgr->pm_en) {
- mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr && hwmgr->pm_en)
hwmgr_handle_task(hwmgr,
AMD_PP_TASK_COMPLETE_INIT, NULL);
- mutex_unlock(&hwmgr->smu_lock);
- }
if (adev->pm.smu_prv_buffer_size != 0)
pp_reserve_vram_for_smu(adev);
@@ -345,11 +341,9 @@ static int pp_dpm_force_performance_level(void *handle,
if (level == hwmgr->dpm_level)
return 0;
- mutex_lock(&hwmgr->smu_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -358,21 +352,16 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- enum amd_dpm_forced_level level;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- level = hwmgr->dpm_level;
- mutex_unlock(&hwmgr->smu_lock);
- return level;
+ return hwmgr->dpm_level;
}
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
@@ -381,16 +370,12 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
- mutex_unlock(&hwmgr->smu_lock);
- return clk;
+ return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
}
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
@@ -399,10 +384,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
- mutex_unlock(&hwmgr->smu_lock);
- return clk;
+ return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
}
static void pp_dpm_powergate_vce(void *handle, bool gate)
@@ -416,9 +398,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- mutex_unlock(&hwmgr->smu_lock);
}
static void pp_dpm_powergate_uvd(void *handle, bool gate)
@@ -432,25 +412,18 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- mutex_unlock(&hwmgr->smu_lock);
}
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
- int ret = 0;
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr_handle_task(hwmgr, task_id, user_state);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr_handle_task(hwmgr, task_id, user_state);
}
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
@@ -462,8 +435,6 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
state = hwmgr->current_ps;
switch (state->classification.ui_label) {
@@ -483,7 +454,6 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&hwmgr->smu_lock);
return pm_type;
}
@@ -501,9 +471,7 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
if (mode == U32_MAX)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -521,16 +489,13 @@ static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
if (!fan_mode)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
@@ -541,16 +506,12 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
if (speed == U32_MAX)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
@@ -561,16 +522,12 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
if (!speed)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
@@ -581,16 +538,12 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
if (!rpm)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
@@ -601,10 +554,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
if (rpm == U32_MAX)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_get_pp_num_states(void *handle,
@@ -618,8 +568,6 @@ static int pp_dpm_get_pp_num_states(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
data->nums = hwmgr->num_ps;
for (i = 0; i < hwmgr->num_ps; i++) {
@@ -642,23 +590,18 @@ static int pp_dpm_get_pp_num_states(void *handle,
data->states[i] = POWER_STATE_TYPE_DEFAULT;
}
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- int size = 0;
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
*table = (char *)hwmgr->soft_pp_table;
- size = hwmgr->soft_pp_table_size;
- mutex_unlock(&hwmgr->smu_lock);
- return size;
+ return hwmgr->soft_pp_table_size;
}
static int amd_powerplay_reset(void *handle)
@@ -685,13 +628,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
if (!hwmgr->hardcode_pp_table)
- goto err;
+ return ret;
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -700,17 +642,11 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
ret = amd_powerplay_reset(handle);
if (ret)
- goto err;
+ return ret;
- if (hwmgr->hwmgr_func->avfs_control) {
+ if (hwmgr->hwmgr_func->avfs_control)
ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
- if (ret)
- goto err;
- }
- mutex_unlock(&hwmgr->smu_lock);
- return 0;
-err:
- mutex_unlock(&hwmgr->smu_lock);
+
return ret;
}
@@ -718,7 +654,6 @@ static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -733,17 +668,13 @@ static int pp_dpm_force_clock_level(void *handle,
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
}
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -752,16 +683,12 @@ static int pp_dpm_print_clock_levels(void *handle,
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
static int pp_dpm_get_sclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -770,16 +697,12 @@ static int pp_dpm_get_sclk_od(void *handle)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
}
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -789,16 +712,12 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
}
static int pp_dpm_get_mclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -807,16 +726,12 @@ static int pp_dpm_get_mclk_od(void *handle)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
}
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -825,17 +740,13 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
}
static int pp_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !value)
return -EINVAL;
@@ -854,10 +765,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
return 0;
default:
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
}
}
@@ -877,36 +785,28 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
static int pp_get_power_profile_mode(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
return -EOPNOTSUPP;
if (!buf)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = -EOPNOTSUPP;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
- return ret;
+ return -EOPNOTSUPP;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("power profile setting is for manual dpm mode only.\n");
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
}
static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
@@ -971,8 +871,6 @@ static int pp_dpm_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
if (!en) {
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
@@ -987,15 +885,12 @@ static int pp_dpm_switch_power_profile(void *handle,
if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
- if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
- mutex_unlock(&hwmgr->smu_lock);
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
return -EINVAL;
- }
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1025,10 +920,8 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
if (limit > max_power_limit)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
hwmgr->power_limit = limit;
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1045,8 +938,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
if (power_type != PP_PWR_TYPE_SUSTAINED)
return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
-
switch (pp_limit_level) {
case PP_PWR_LIMIT_CURRENT:
*limit = hwmgr->power_limit;
@@ -1066,8 +957,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
break;
}
- mutex_unlock(&hwmgr->smu_lock);
-
return ret;
}
@@ -1079,9 +968,7 @@ static int pp_display_configuration_change(void *handle,
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
phm_store_dal_configuration_data(hwmgr, display_config);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1089,15 +976,11 @@ static int pp_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!output)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_dal_power_level(hwmgr, output);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_dal_power_level(hwmgr, output);
}
static int pp_get_current_clocks(void *handle,
@@ -1111,8 +994,6 @@ static int pp_get_current_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
phm_get_dal_power_level(hwmgr, &simple_clocks);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -1125,7 +1006,6 @@ static int pp_get_current_clocks(void *handle,
if (ret) {
pr_debug("Error in phm_get_clock_info \n");
- mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1148,14 +1028,12 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1163,10 +1041,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
if (clocks == NULL)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_clock_by_type(hwmgr, type, clocks);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_latency(void *handle,
@@ -1174,15 +1049,11 @@ static int pp_get_clock_by_type_with_latency(void *handle,
struct pp_clock_levels_with_latency *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_voltage(void *handle,
@@ -1190,50 +1061,34 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
struct pp_clock_levels_with_voltage *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
- ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
-
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
- clock_ranges);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_set_watermarks_for_clocks_ranges(hwmgr,
+ clock_ranges);
}
static int pp_display_clock_voltage_request(void *handle,
struct pp_display_clock_request *clock)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clock)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_display_clock_voltage_request(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_display_clock_voltage_request(hwmgr, clock);
}
static int pp_get_display_mode_validation_clocks(void *handle,
@@ -1247,12 +1102,9 @@ static int pp_get_display_mode_validation_clocks(void *handle,
clocks->level = PP_DAL_POWERLEVEL_7;
- mutex_lock(&hwmgr->smu_lock);
-
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
ret = phm_get_max_high_clocks(hwmgr, clocks);
- mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1364,9 +1216,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1382,9 +1232,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1401,9 +1249,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1420,9 +1266,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1439,9 +1283,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1449,16 +1291,11 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
static int pp_set_active_display_count(void *handle, uint32_t count)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_set_active_display_count(hwmgr, count);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_set_active_display_count(hwmgr, count);
}
static int pp_get_asic_baco_capability(void *handle, bool *cap)
@@ -1473,9 +1310,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
!hwmgr->hwmgr_func->get_asic_baco_capability)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1490,9 +1325,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1508,9 +1341,7 @@ static int pp_set_asic_baco_state(void *handle, int state)
!hwmgr->hwmgr_func->set_asic_baco_state)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1518,7 +1349,6 @@ static int pp_set_asic_baco_state(void *handle, int state)
static int pp_get_ppfeature_status(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !buf)
return -EINVAL;
@@ -1528,17 +1358,12 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
}
static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1548,17 +1373,12 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
}
static int pp_asic_reset_mode_2(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1568,17 +1388,12 @@ static int pp_asic_reset_mode_2(void *handle)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
}
static int pp_smu_i2c_bus_access(void *handle, bool acquire)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1588,11 +1403,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
}
static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
@@ -1605,9 +1416,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1622,9 +1431,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1632,7 +1439,6 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
static ssize_t pp_get_gpu_metrics(void *handle, void **table)
{
struct pp_hwmgr *hwmgr = handle;
- ssize_t size;
if (!hwmgr)
return -EINVAL;
@@ -1640,11 +1446,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
- size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
- mutex_unlock(&hwmgr->smu_lock);
-
- return size;
+ return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
}
static int pp_gfx_state_change_set(void *handle, uint32_t state)
@@ -1659,9 +1461,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1675,12 +1475,10 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
- mutex_lock(&hwmgr->smu_lock);
if (adev->pm.smu_prv_buffer) {
amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
*size = adev->pm.smu_prv_buffer_size;
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 03226baea65e..4f7f2f455301 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -748,7 +748,6 @@ struct pp_hwmgr {
bool not_vf;
bool pm_en;
bool pp_one_vf;
- struct mutex smu_lock;
struct mutex msg_lock;
uint32_t pp_table_version;
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* RE: [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock
2022-01-17 5:41 ` [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock Evan Quan
@ 2022-01-20 11:51 ` Quan, Evan
2022-01-20 13:41 ` Chen, Guchun
0 siblings, 1 reply; 14+ messages in thread
From: Quan, Evan @ 2022-01-20 11:51 UTC (permalink / raw)
To: amd-gfx; +Cc: Deucher, Alexander, Lazar, Lijo, Chen, Guchun
[AMD Official Use Only]
Ping for the series..
> -----Original Message-----
> From: Quan, Evan <Evan.Quan@amd.com>
> Sent: Monday, January 17, 2022 1:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo
> <Lijo.Lazar@amd.com>; Chen, Guchun <Guchun.Chen@amd.com>; Quan,
> Evan <Evan.Quan@amd.com>
> Subject: [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock
>
> As all those related APIs are already well protected by adev->pm.mutex.
>
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I36426791d3bbc9d84a6ae437da26a892682eb0cb
> ---
> .../gpu/drm/amd/pm/powerplay/amd_powerplay.c | 278 +++---------------
> drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 1 -
> 2 files changed, 38 insertions(+), 241 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> index 76c26ae368f9..a2da46bf3985 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> @@ -50,7 +50,6 @@ static int amd_powerplay_create(struct amdgpu_device
> *adev)
> hwmgr->adev = adev;
> hwmgr->not_vf = !amdgpu_sriov_vf(adev);
> hwmgr->device = amdgpu_cgs_create_device(adev);
> - mutex_init(&hwmgr->smu_lock);
> mutex_init(&hwmgr->msg_lock);
> hwmgr->chip_family = adev->family;
> hwmgr->chip_id = adev->asic_type;
> @@ -178,12 +177,9 @@ static int pp_late_init(void *handle)
> struct amdgpu_device *adev = handle;
> struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
>
> - if (hwmgr && hwmgr->pm_en) {
> - mutex_lock(&hwmgr->smu_lock);
> + if (hwmgr && hwmgr->pm_en)
> hwmgr_handle_task(hwmgr,
> AMD_PP_TASK_COMPLETE_INIT,
> NULL);
> - mutex_unlock(&hwmgr->smu_lock);
> - }
> if (adev->pm.smu_prv_buffer_size != 0)
> pp_reserve_vram_for_smu(adev);
>
> @@ -345,11 +341,9 @@ static int pp_dpm_force_performance_level(void
> *handle,
> if (level == hwmgr->dpm_level)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> pp_dpm_en_umd_pstate(hwmgr, &level);
> hwmgr->request_dpm_level = level;
> hwmgr_handle_task(hwmgr,
> AMD_PP_TASK_READJUST_POWER_STATE, NULL);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -358,21 +352,16 @@ static enum amd_dpm_forced_level
> pp_dpm_get_performance_level(
> void *handle)
> {
> struct pp_hwmgr *hwmgr = handle;
> - enum amd_dpm_forced_level level;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - level = hwmgr->dpm_level;
> - mutex_unlock(&hwmgr->smu_lock);
> - return level;
> + return hwmgr->dpm_level;
> }
>
> static uint32_t pp_dpm_get_sclk(void *handle, bool low) {
> struct pp_hwmgr *hwmgr = handle;
> - uint32_t clk = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return 0;
> @@ -381,16 +370,12 @@ static uint32_t pp_dpm_get_sclk(void *handle,
> bool low)
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
> - mutex_unlock(&hwmgr->smu_lock);
> - return clk;
> + return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
> }
>
> static uint32_t pp_dpm_get_mclk(void *handle, bool low) {
> struct pp_hwmgr *hwmgr = handle;
> - uint32_t clk = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return 0;
> @@ -399,10 +384,7 @@ static uint32_t pp_dpm_get_mclk(void *handle,
> bool low)
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
> - mutex_unlock(&hwmgr->smu_lock);
> - return clk;
> + return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
> }
>
> static void pp_dpm_powergate_vce(void *handle, bool gate) @@ -416,9
> +398,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return;
> }
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
> - mutex_unlock(&hwmgr->smu_lock);
> }
>
> static void pp_dpm_powergate_uvd(void *handle, bool gate) @@ -432,25
> +412,18 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return;
> }
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
> - mutex_unlock(&hwmgr->smu_lock);
> }
>
> static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
> enum amd_pm_state_type *user_state)
> {
> - int ret = 0;
> struct pp_hwmgr *hwmgr = handle;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr_handle_task(hwmgr, task_id, user_state);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr_handle_task(hwmgr, task_id, user_state);
> }
>
> static enum amd_pm_state_type pp_dpm_get_current_power_state(void
> *handle) @@ -462,8 +435,6 @@ static enum amd_pm_state_type
> pp_dpm_get_current_power_state(void *handle)
> if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> state = hwmgr->current_ps;
>
> switch (state->classification.ui_label) { @@ -483,7 +454,6 @@ static
> enum amd_pm_state_type pp_dpm_get_current_power_state(void
> *handle)
> pm_type = POWER_STATE_TYPE_DEFAULT;
> break;
> }
> - mutex_unlock(&hwmgr->smu_lock);
>
> return pm_type;
> }
> @@ -501,9 +471,7 @@ static int pp_dpm_set_fan_control_mode(void
> *handle, uint32_t mode)
> if (mode == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -521,16 +489,13 @@ static int pp_dpm_get_fan_control_mode(void
> *handle, uint32_t *fan_mode)
> if (!fan_mode)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> *fan_mode = hwmgr->hwmgr_func-
> >get_fan_control_mode(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -541,16 +506,12 @@ static int pp_dpm_set_fan_speed_pwm(void
> *handle, uint32_t speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
> }
>
> static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -561,16 +522,12 @@ static int pp_dpm_get_fan_speed_pwm(void
> *handle, uint32_t *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
> }
>
> static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -581,16 +538,12 @@ static int pp_dpm_get_fan_speed_rpm(void
> *handle, uint32_t *rpm)
> if (!rpm)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
> }
>
> static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -601,10 +554,7 @@ static int pp_dpm_set_fan_speed_rpm(void
> *handle, uint32_t rpm)
> if (rpm == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
> }
>
> static int pp_dpm_get_pp_num_states(void *handle, @@ -618,8 +568,6
> @@ static int pp_dpm_get_pp_num_states(void *handle,
> if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> data->nums = hwmgr->num_ps;
>
> for (i = 0; i < hwmgr->num_ps; i++) {
> @@ -642,23 +590,18 @@ static int pp_dpm_get_pp_num_states(void
> *handle,
> data->states[i] =
> POWER_STATE_TYPE_DEFAULT;
> }
> }
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> static int pp_dpm_get_pp_table(void *handle, char **table) {
> struct pp_hwmgr *hwmgr = handle;
> - int size = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> *table = (char *)hwmgr->soft_pp_table;
> - size = hwmgr->soft_pp_table_size;
> - mutex_unlock(&hwmgr->smu_lock);
> - return size;
> + return hwmgr->soft_pp_table_size;
> }
>
> static int amd_powerplay_reset(void *handle) @@ -685,13 +628,12 @@
> static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> if (!hwmgr->hardcode_pp_table) {
> hwmgr->hardcode_pp_table = kmemdup(hwmgr-
> >soft_pp_table,
> hwmgr-
> >soft_pp_table_size,
> GFP_KERNEL);
> if (!hwmgr->hardcode_pp_table)
> - goto err;
> + return ret;
> }
>
> memcpy(hwmgr->hardcode_pp_table, buf, size); @@ -700,17
> +642,11 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf,
> size_t size)
>
> ret = amd_powerplay_reset(handle);
> if (ret)
> - goto err;
> + return ret;
>
> - if (hwmgr->hwmgr_func->avfs_control) {
> + if (hwmgr->hwmgr_func->avfs_control)
> ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
> - if (ret)
> - goto err;
> - }
> - mutex_unlock(&hwmgr->smu_lock);
> - return 0;
> -err:
> - mutex_unlock(&hwmgr->smu_lock);
> +
> return ret;
> }
>
> @@ -718,7 +654,6 @@ static int pp_dpm_force_clock_level(void *handle,
> enum pp_clock_type type, uint32_t mask) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -733,17 +668,13 @@ static int pp_dpm_force_clock_level(void *handle,
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->force_clock_level(hwmgr, type,
> mask);
> }
>
> static int pp_dpm_print_clock_levels(void *handle,
> enum pp_clock_type type, char *buf)
> {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -752,16 +683,12 @@ static int pp_dpm_print_clock_levels(void *handle,
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
> }
>
> static int pp_dpm_get_sclk_od(void *handle) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -770,16 +697,12 @@ static int pp_dpm_get_sclk_od(void *handle)
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
> }
>
> static int pp_dpm_set_sclk_od(void *handle, uint32_t value) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -789,16 +712,12 @@ static int pp_dpm_set_sclk_od(void *handle,
> uint32_t value)
> return 0;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
> }
>
> static int pp_dpm_get_mclk_od(void *handle) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -807,16 +726,12 @@ static int pp_dpm_get_mclk_od(void *handle)
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
> }
>
> static int pp_dpm_set_mclk_od(void *handle, uint32_t value) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -825,17 +740,13 @@ static int pp_dpm_set_mclk_od(void *handle,
> uint32_t value)
> pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
> }
>
> static int pp_dpm_read_sensor(void *handle, int idx,
> void *value, int *size)
> {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en || !value)
> return -EINVAL;
> @@ -854,10 +765,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
> *((uint32_t *)value) = hwmgr-
> >thermal_controller.fanInfo.ulMaxRPM;
> return 0;
> default:
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value,
> size);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->read_sensor(hwmgr, idx,
> value, size);
> }
> }
>
> @@ -877,36 +785,28 @@ pp_dpm_get_vce_clock_state(void *handle,
> unsigned idx) static int pp_get_power_profile_mode(void *handle, char
> *buf) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret;
>
> if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func-
> >get_power_profile_mode)
> return -EOPNOTSUPP;
> if (!buf)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr,
> buf);
> }
>
> static int pp_set_power_profile_mode(void *handle, long *input, uint32_t
> size) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = -EOPNOTSUPP;
>
> if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func-
> >set_power_profile_mode)
> - return ret;
> + return -EOPNOTSUPP;
>
> if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> pr_debug("power profile setting is for manual dpm mode
> only.\n");
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> input, size);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> input, size);
> }
>
> static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input,
> uint32_t size) @@ -971,8 +871,6 @@ static int
> pp_dpm_switch_power_profile(void *handle,
> if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> if (!en) {
> hwmgr->workload_mask &= ~(1 << hwmgr-
> >workload_prority[type]);
> index = fls(hwmgr->workload_mask);
> @@ -987,15 +885,12 @@ static int pp_dpm_switch_power_profile(void
> *handle,
>
> if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
> hwmgr->hwmgr_func-
> >disable_power_features_for_compute_performance) {
> - if (hwmgr->hwmgr_func-
> >disable_power_features_for_compute_performance(hwmgr, en)) {
> - mutex_unlock(&hwmgr->smu_lock);
> + if
> +(hwmgr->hwmgr_func-
> >disable_power_features_for_compute_performance(hwmg
> +r, en))
> return -EINVAL;
> - }
> }
>
> if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> &workload, 0);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1025,10 +920,8 @@ static int pp_set_power_limit(void *handle,
> uint32_t limit)
> if (limit > max_power_limit)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
> hwmgr->power_limit = limit;
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> @@ -1045,8 +938,6 @@ static int pp_get_power_limit(void *handle, uint32_t
> *limit,
> if (power_type != PP_PWR_TYPE_SUSTAINED)
> return -EOPNOTSUPP;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> switch (pp_limit_level) {
> case PP_PWR_LIMIT_CURRENT:
> *limit = hwmgr->power_limit;
> @@ -1066,8 +957,6 @@ static int pp_get_power_limit(void *handle, uint32_t
> *limit,
> break;
> }
>
> - mutex_unlock(&hwmgr->smu_lock);
> -
> return ret;
> }
>
> @@ -1079,9 +968,7 @@ static int pp_display_configuration_change(void
> *handle,
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> phm_store_dal_configuration_data(hwmgr, display_config);
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> @@ -1089,15 +976,11 @@ static int pp_get_display_power_level(void
> *handle,
> struct amd_pp_simple_clock_info *output) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!output)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_get_dal_power_level(hwmgr, output);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_dal_power_level(hwmgr, output);
> }
>
> static int pp_get_current_clocks(void *handle, @@ -1111,8 +994,6 @@ static
> int pp_get_current_clocks(void *handle,
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> phm_get_dal_power_level(hwmgr, &simple_clocks);
>
> if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
> @@ -1125,7 +1006,6 @@ static int pp_get_current_clocks(void *handle,
>
> if (ret) {
> pr_debug("Error in phm_get_clock_info \n");
> - mutex_unlock(&hwmgr->smu_lock);
> return -EINVAL;
> }
>
> @@ -1148,14 +1028,12 @@ static int pp_get_current_clocks(void *handle,
> clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
> clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
> }
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type
> type, struct amd_pp_clocks *clocks) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1163,10 +1041,7 @@ static int pp_get_clock_by_type(void *handle,
> enum amd_pp_clock_type type, struc
> if (clocks == NULL)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_get_clock_by_type(hwmgr, type, clocks);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_clock_by_type(hwmgr, type, clocks);
> }
>
> static int pp_get_clock_by_type_with_latency(void *handle, @@ -1174,15
> +1049,11 @@ static int pp_get_clock_by_type_with_latency(void *handle,
> struct pp_clock_levels_with_latency *clocks) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!clocks)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
> }
>
> static int pp_get_clock_by_type_with_voltage(void *handle, @@ -1190,50
> +1061,34 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
> struct pp_clock_levels_with_voltage *clocks) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!clocks)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> - ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
> -
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
> }
>
> static int pp_set_watermarks_for_clocks_ranges(void *handle,
> void *clock_ranges)
> {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
> - clock_ranges);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return phm_set_watermarks_for_clocks_ranges(hwmgr,
> + clock_ranges);
> }
>
> static int pp_display_clock_voltage_request(void *handle,
> struct pp_display_clock_request *clock) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!clock)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_display_clock_voltage_request(hwmgr, clock);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return phm_display_clock_voltage_request(hwmgr, clock);
> }
>
> static int pp_get_display_mode_validation_clocks(void *handle, @@ -
> 1247,12 +1102,9 @@ static int pp_get_display_mode_validation_clocks(void
> *handle,
>
> clocks->level = PP_DAL_POWERLEVEL_7;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
> PHM_PlatformCaps_DynamicPatchPowerState))
> ret = phm_get_max_high_clocks(hwmgr, clocks);
>
> - mutex_unlock(&hwmgr->smu_lock);
> return ret;
> }
>
> @@ -1364,9 +1216,7 @@ static int pp_notify_smu_enable_pwe(void
> *handle)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1382,9 +1232,7 @@ static int pp_enable_mgpu_fan_boost(void
> *handle)
> hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1401,9 +1249,7 @@ static int pp_set_min_deep_sleep_dcefclk(void
> *handle, uint32_t clock)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1420,9 +1266,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void
> *handle, uint32_t clock)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr,
> clock);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1439,9 +1283,7 @@ static int pp_set_hard_min_fclk_by_freq(void
> *handle, uint32_t clock)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1449,16 +1291,11 @@ static int pp_set_hard_min_fclk_by_freq(void
> *handle, uint32_t clock) static int pp_set_active_display_count(void *handle,
> uint32_t count) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_set_active_display_count(hwmgr, count);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return phm_set_active_display_count(hwmgr, count);
> }
>
> static int pp_get_asic_baco_capability(void *handle, bool *cap) @@ -1473,9
> +1310,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
> !hwmgr->hwmgr_func->get_asic_baco_capability)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1490,9 +1325,7 @@ static int pp_get_asic_baco_state(void *handle, int
> *state)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum
> BACO_STATE *)state);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1508,9 +1341,7 @@ static int pp_set_asic_baco_state(void *handle, int
> state)
> !hwmgr->hwmgr_func->set_asic_baco_state)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum
> BACO_STATE)state);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1518,7 +1349,6 @@ static int pp_set_asic_baco_state(void *handle, int
> state) static int pp_get_ppfeature_status(void *handle, char *buf) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en || !buf)
> return -EINVAL;
> @@ -1528,17 +1358,12 @@ static int pp_get_ppfeature_status(void *handle,
> char *buf)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
> }
>
> static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
> {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1548,17 +1373,12 @@ static int pp_set_ppfeature_status(void *handle,
> uint64_t ppfeature_masks)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr,
> ppfeature_masks);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr,
> +ppfeature_masks);
> }
>
> static int pp_asic_reset_mode_2(void *handle) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1568,17 +1388,12 @@ static int pp_asic_reset_mode_2(void *handle)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->asic_reset(hwmgr,
> SMU_ASIC_RESET_MODE_2);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->asic_reset(hwmgr,
> SMU_ASIC_RESET_MODE_2);
> }
>
> static int pp_smu_i2c_bus_access(void *handle, bool acquire) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1588,11 +1403,7 @@ static int pp_smu_i2c_bus_access(void *handle,
> bool acquire)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
> }
>
> static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) @@ -
> 1605,9 +1416,7 @@ static int pp_set_df_cstate(void *handle, enum
> pp_df_cstate state)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1622,9 +1431,7 @@ static int pp_set_xgmi_pstate(void *handle,
> uint32_t pstate)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1632,7 +1439,6 @@ static int pp_set_xgmi_pstate(void *handle,
> uint32_t pstate) static ssize_t pp_get_gpu_metrics(void *handle, void
> **table) {
> struct pp_hwmgr *hwmgr = handle;
> - ssize_t size;
>
> if (!hwmgr)
> return -EINVAL;
> @@ -1640,11 +1446,7 @@ static ssize_t pp_get_gpu_metrics(void *handle,
> void **table)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
> return -EOPNOTSUPP;
>
> - mutex_lock(&hwmgr->smu_lock);
> - size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return size;
> + return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
> }
>
> static int pp_gfx_state_change_set(void *handle, uint32_t state) @@ -
> 1659,9 +1461,7 @@ static int pp_gfx_state_change_set(void *handle,
> uint32_t state)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> @@ -1675,12 +1475,10 @@ static int pp_get_prv_buffer_details(void
> *handle, void **addr, size_t *size)
>
> *addr = NULL;
> *size = 0;
> - mutex_lock(&hwmgr->smu_lock);
> if (adev->pm.smu_prv_buffer) {
> amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
> *size = adev->pm.smu_prv_buffer_size;
> }
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> index 03226baea65e..4f7f2f455301 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> @@ -748,7 +748,6 @@ struct pp_hwmgr {
> bool not_vf;
> bool pm_en;
> bool pp_one_vf;
> - struct mutex smu_lock;
> struct mutex msg_lock;
>
> uint32_t pp_table_version;
> --
> 2.29.0
^ permalink raw reply [flat|nested] 14+ messages in thread
* RE: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
` (5 preceding siblings ...)
2022-01-17 5:41 ` [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock Evan Quan
@ 2022-01-20 13:37 ` Chen, Guchun
2022-01-21 7:09 ` Quan, Evan
2022-01-20 15:23 ` Lazar, Lijo
2022-01-20 15:59 ` Lazar, Lijo
8 siblings, 1 reply; 14+ messages in thread
From: Chen, Guchun @ 2022-01-20 13:37 UTC (permalink / raw)
To: Quan, Evan, amd-gfx; +Cc: Deucher, Alexander, Lazar, Lijo
[Public]
if (!smu_table->hardcode_pptable)
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
- if (!smu_table->hardcode_pptable) {
- ret = -ENOMEM;
- goto failed;
- }
+ if (!smu_table->hardcode_pptable)
+ return -ENOMEM;
I guess it's better to put the second check of hardcode_pptable into first if condition section like:
if (!smu_table->hardcode_pptable) {
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable)
return -ENOMEM;
}
Regards,
Guchun
-----Original Message-----
From: Quan, Evan <Evan.Quan@amd.com>
Sent: Monday, January 17, 2022 1:42 PM
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo <Lijo.Lazar@amd.com>; Chen, Guchun <Guchun.Chen@amd.com>; Quan, Evan <Evan.Quan@amd.com>
Subject: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex
As all those APIs are already protected either by adev->pm.mutex or smu->message_lock.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I1db751fba9caabc5ca1314992961d3674212f9b0
---
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 315 ++----------------
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
.../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 -
.../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 -
.../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 -
.../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 -
6 files changed, 25 insertions(+), 299 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 828cb932f6a9..411f03eb4523 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -55,8 +55,7 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
uint32_t mask);
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed);
+ enum amd_pp_task task_id);
static int smu_reset(struct smu_context *smu); static int smu_set_fan_speed_pwm(void *handle, u32 speed); static int smu_set_fan_control_mode(void *handle, u32 value); @@ -68,36 +67,22 @@ static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
{
struct smu_context *smu = handle;
- int size = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu_get_pp_feature_mask(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu_get_pp_feature_mask(smu, buf);
}
static int smu_sys_set_pp_feature_mask(void *handle,
uint64_t new_mask)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_pp_feature_mask(smu, new_mask);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_pp_feature_mask(smu, new_mask);
}
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) @@ -117,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu, {
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -140,16 +121,12 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
if (!min && !max)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_ultimate_freq)
ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -482,7 +459,6 @@ static int smu_sys_get_pp_table(void *handle, {
struct smu_context *smu = handle;
struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t powerplay_table_size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -490,18 +466,12 @@ static int smu_sys_get_pp_table(void *handle,
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- powerplay_table_size = smu_table->power_play_table_size;
-
- mutex_unlock(&smu->mutex);
-
- return powerplay_table_size;
+ return smu_table->power_play_table_size;
}
static int smu_sys_set_pp_table(void *handle, @@ -521,13 +491,10 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- mutex_lock(&smu->mutex);
if (!smu_table->hardcode_pptable)
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
- if (!smu_table->hardcode_pptable) {
- ret = -ENOMEM;
- goto failed;
- }
+ if (!smu_table->hardcode_pptable)
+ return -ENOMEM;
memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable; @@ -545,8 +512,6 @@ static int smu_sys_set_pp_table(void *handle,
smu->uploading_custom_pp_table = false;
-failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -633,7 +598,6 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- mutex_init(&smu->mutex);
mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false; @@ -736,8 +700,7 @@ static int smu_late_init(void *handle)
smu_handle_task(smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT,
- false);
+ AMD_PP_TASK_COMPLETE_INIT);
smu_restore_dpm_user_profile(smu);
@@ -1013,12 +976,8 @@ static void smu_interrupt_work_fn(struct work_struct *work)
struct smu_context *smu = container_of(work, struct smu_context,
interrupt_work);
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
smu->ppt_funcs->interrupt_work(smu);
-
- mutex_unlock(&smu->mutex);
}
static int smu_sw_init(void *handle)
@@ -1632,8 +1591,6 @@ static int smu_display_configuration_change(void *handle,
if (!display_config)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
smu_set_min_dcef_deep_sleep(smu,
display_config->min_dcef_deep_sleep_set_clk / 100);
@@ -1642,8 +1599,6 @@ static int smu_display_configuration_change(void *handle,
num_of_active_display++;
}
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1766,22 +1721,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed)
+ enum amd_pp_task task_id)
{
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- if (lock_needed)
- mutex_lock(&smu->mutex);
-
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- goto out;
+ return ret;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1792,10 +1743,6 @@ static int smu_handle_task(struct smu_context *smu,
break;
}
-out:
- if (lock_needed)
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -1806,7 +1753,7 @@ static int smu_handle_dpm_task(void *handle,
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
+ return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
}
@@ -1825,8 +1772,6 @@ static int smu_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (!en) {
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
@@ -1843,8 +1788,6 @@ static int smu_switch_power_profile(void *handle,
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, &workload, 0);
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1852,7 +1795,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle) {
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- enum amd_dpm_forced_level level;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -1860,11 +1802,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&(smu->mutex));
- level = smu_dpm_ctx->dpm_level;
- mutex_unlock(&(smu->mutex));
-
- return level;
+ return smu_dpm_ctx->dpm_level;
}
static int smu_force_performance_level(void *handle, @@ -1880,19 +1818,12 @@ static int smu_force_performance_level(void *handle,
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu_enable_umd_pstate(smu, &level);
- if (ret) {
- mutex_unlock(&smu->mutex);
+ if (ret)
return ret;
- }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE,
- false);
-
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_READJUST_POWER_STATE);
/* reset user dpm clock state */
if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { @@ -1906,16 +1837,11 @@ static int smu_force_performance_level(void *handle, static int smu_set_display_count(void *handle, uint32_t count) {
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
- ret = smu_init_display_count(smu, count);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_init_display_count(smu, count);
}
static int smu_force_smuclk_levels(struct smu_context *smu, @@ -1933,8 +1859,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
return -EINVAL;
}
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { @@ -1943,8 +1867,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2003,14 +1925,10 @@ static int smu_set_mp1_state(void *handle,
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs &&
smu->ppt_funcs->set_mp1_state)
ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2026,14 +1944,10 @@ static int smu_set_df_cstate(void *handle,
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_df_cstate(smu, state);
if (ret)
dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2047,38 +1961,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
if (ret)
dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
int smu_write_watermarks_table(struct smu_context *smu) {
- int ret = 0;
-
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, NULL);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, NULL);
}
static int smu_set_watermarks_for_clock_ranges(void *handle,
struct pp_smu_wm_range_sets *clock_ranges) {
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -2086,13 +1987,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
if (smu->disable_watermark)
return 0;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, clock_ranges);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, clock_ranges);
}
int smu_set_ac_dc(struct smu_context *smu) @@ -2106,14 +2001,12 @@ int smu_set_ac_dc(struct smu_context *smu)
if (smu->dc_controlled_by_gpio)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_power_source(smu,
smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (ret)
dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
smu->adev->pm.ac_power ? "AC" : "DC");
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2200,13 +2093,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) {
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_gfx_cgpg)
ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2224,8 +2113,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
if (speed == U32_MAX)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; @@ -2236,8 +2123,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
smu->user_dpm_profile.fan_speed_pwm = 0;
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2293,8 +2178,6 @@ int smu_get_power_limit(void *handle,
break;
}
- mutex_lock(&smu->mutex);
-
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
if (smu->ppt_funcs->get_ppt_limit)
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); @@ -2328,8 +2211,6 @@ int smu_get_power_limit(void *handle,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2342,21 +2223,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
limit &= (1<<24)-1;
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- goto out;
- }
+ if (smu->ppt_funcs->set_power_limit)
+ return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
if (limit > smu->max_power_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
limit, smu->max_power_limit);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!limit)
@@ -2368,9 +2244,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
smu->user_dpm_profile.power_limit = limit;
}
-out:
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2381,13 +2254,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->print_clk_levels)
ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2444,14 +2313,10 @@ static int smu_od_edit_dpm_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->od_edit_dpm_table) {
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2475,8 +2340,6 @@ static int smu_read_sensor(void *handle,
size_val = *size_arg;
size = &size_val;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->read_sensor)
if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
goto unlock;
@@ -2517,8 +2380,6 @@ static int smu_read_sensor(void *handle,
}
unlock:
- mutex_unlock(&smu->mutex);
-
// assign uint32_t to int
*size_arg = size_val;
@@ -2528,7 +2389,6 @@ static int smu_read_sensor(void *handle, static int smu_get_power_profile_mode(void *handle, char *buf) {
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->get_power_profile_mode)
@@ -2536,13 +2396,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
if (!buf)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
- ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu->ppt_funcs->get_power_profile_mode(smu, buf);
}
static int smu_set_power_profile_mode(void *handle, @@ -2550,19 +2404,12 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- smu_bump_power_profile_mode(smu, param, param_size);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_bump_power_profile_mode(smu, param, param_size);
}
@@ -2579,12 +2426,8 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
if (!fan_mode)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -2602,8 +2445,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
if (value == U32_MAX)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
if (ret)
goto out;
@@ -2620,8 +2461,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
}
out:
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2639,12 +2478,8 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
if (!speed)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2662,8 +2497,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
if (speed == U32_MAX)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; @@ -2674,8 +2507,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
smu->user_dpm_profile.fan_speed_rpm = 0;
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2693,30 +2524,19 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
if (!speed)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
- mutex_unlock(&smu->mutex);
-
return ret;
}
static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) {
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_min_dcef_deep_sleep(smu, clk);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_min_dcef_deep_sleep(smu, clk);
}
static int smu_get_clock_by_type_with_latency(void *handle, @@ -2730,8 +2550,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_clock_by_type_with_latency) {
switch (type) {
case amd_pp_sys_clock:
@@ -2748,15 +2566,12 @@ static int smu_get_clock_by_type_with_latency(void *handle,
break;
default:
dev_err(smu->adev->dev, "Invalid clock type!\n");
- mutex_unlock(&smu->mutex);
return -EINVAL;
}
ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2769,13 +2584,9 @@ static int smu_display_clock_voltage_request(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_clock_voltage_request)
ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2789,13 +2600,9 @@ static int smu_display_disable_memory_clock_switch(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_disable_memory_clock_switch)
ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2808,13 +2615,9 @@ static int smu_set_xgmi_pstate(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_xgmi_pstate)
ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
- mutex_unlock(&smu->mutex);
-
if(ret)
dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
@@ -2824,21 +2627,16 @@ static int smu_set_xgmi_pstate(void *handle, static int smu_get_baco_capability(void *handle, bool *cap) {
struct smu_context *smu = handle;
- int ret = 0;
*cap = false;
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
*cap = smu->ppt_funcs->baco_is_support(smu);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return 0;
}
static int smu_baco_set_state(void *handle, int state) @@ -2850,20 +2648,11 @@ static int smu_baco_set_state(void *handle, int state)
return -EOPNOTSUPP;
if (state == 0) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_exit)
ret = smu->ppt_funcs->baco_exit(smu);
-
- mutex_unlock(&smu->mutex);
} else if (state == 1) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_enter)
ret = smu->ppt_funcs->baco_enter(smu);
-
- mutex_unlock(&smu->mutex);
-
} else {
return -EINVAL;
}
@@ -2882,13 +2671,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
ret = smu->ppt_funcs->mode1_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2899,13 +2684,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
ret = smu->ppt_funcs->mode2_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2916,13 +2697,9 @@ int smu_mode1_reset(struct smu_context *smu)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode1_reset)
ret = smu->ppt_funcs->mode1_reset(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2934,13 +2711,9 @@ static int smu_mode2_reset(void *handle)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode2_reset)
ret = smu->ppt_funcs->mode2_reset(smu);
- mutex_unlock(&smu->mutex);
-
if (ret)
dev_err(smu->adev->dev, "Mode2 reset failed!\n");
@@ -2956,13 +2729,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2976,13 +2745,9 @@ static int smu_get_uclk_dpm_states(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_uclk_dpm_states)
ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2994,13 +2759,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_current_power_state)
pm_state = smu->ppt_funcs->get_current_power_state(smu);
- mutex_unlock(&smu->mutex);
-
return pm_state;
}
@@ -3013,20 +2774,15 @@ static int smu_get_dpm_clock_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_clock_table)
ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
- mutex_unlock(&smu->mutex);
-
return ret;
}
static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) {
struct smu_context *smu = handle;
- ssize_t size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -3034,13 +2790,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
if (!smu->ppt_funcs->get_gpu_metrics)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu->ppt_funcs->get_gpu_metrics(smu, table);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu->ppt_funcs->get_gpu_metrics(smu, table);
}
static int smu_enable_mgpu_fan_boost(void *handle) @@ -3051,13 +2801,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->enable_mgpu_fan_boost)
ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -3067,10 +2813,8 @@ static int smu_gfx_state_change_set(void *handle,
struct smu_context *smu = handle;
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->gfx_state_change_set)
ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3079,10 +2823,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) {
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->smu_handle_passthrough_sbr)
ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3091,11 +2833,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) {
int ret = -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs &&
smu->ppt_funcs->get_ecc_info)
ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
- mutex_unlock(&smu->mutex);
return ret;
@@ -3112,12 +2852,10 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
- mutex_lock(&smu->mutex);
if (memory_pool->bo) {
*addr = memory_pool->cpu_addr;
*size = memory_pool->size;
}
- mutex_unlock(&smu->mutex);
return 0;
}
@@ -3181,11 +2919,8 @@ int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, {
int ret = -EINVAL;
- if (smu->ppt_funcs->wait_for_event) {
- mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->wait_for_event)
ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
- mutex_unlock(&smu->mutex);
- }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3fdab6a44901..00760f3c6da5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -488,7 +488,6 @@ struct smu_context
const struct cmn2asic_mapping *table_map;
const struct cmn2asic_mapping *pwr_src_map;
const struct cmn2asic_mapping *workload_map;
- struct mutex mutex;
struct mutex sensor_lock;
struct mutex metrics_lock;
struct mutex message_lock;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index d3963bfe5c89..addb0472d040 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2118,9 +2118,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 37e11716e919..fe17b3c1ece7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2826,9 +2826,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9766870987db..93caaf45a2db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3483,9 +3483,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index ac8ba5e0e697..2546f79c8511 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1521,9 +1521,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&smu->mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&smu->mutex);
if (r)
goto fail;
--
2.29.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* RE: [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock
2022-01-20 11:51 ` Quan, Evan
@ 2022-01-20 13:41 ` Chen, Guchun
0 siblings, 0 replies; 14+ messages in thread
From: Chen, Guchun @ 2022-01-20 13:41 UTC (permalink / raw)
To: Quan, Evan, amd-gfx; +Cc: Deucher, Alexander, Lazar, Lijo
[Public]
With the comment in patch 1 addressed, the series is:
Reviewed-by: Guchun Chen <guchun.chen@amd.com>
Regards,
Guchun
-----Original Message-----
From: Quan, Evan <Evan.Quan@amd.com>
Sent: Thursday, January 20, 2022 7:52 PM
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo <Lijo.Lazar@amd.com>; Chen, Guchun <Guchun.Chen@amd.com>
Subject: RE: [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock
[AMD Official Use Only]
Ping for the series..
> -----Original Message-----
> From: Quan, Evan <Evan.Quan@amd.com>
> Sent: Monday, January 17, 2022 1:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo
> <Lijo.Lazar@amd.com>; Chen, Guchun <Guchun.Chen@amd.com>; Quan, Evan
> <Evan.Quan@amd.com>
> Subject: [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock
>
> As all those related APIs are already well protected by adev->pm.mutex.
>
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I36426791d3bbc9d84a6ae437da26a892682eb0cb
> ---
> .../gpu/drm/amd/pm/powerplay/amd_powerplay.c | 278 +++---------------
> drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 1 -
> 2 files changed, 38 insertions(+), 241 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> index 76c26ae368f9..a2da46bf3985 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> @@ -50,7 +50,6 @@ static int amd_powerplay_create(struct amdgpu_device
> *adev)
> hwmgr->adev = adev;
> hwmgr->not_vf = !amdgpu_sriov_vf(adev);
> hwmgr->device = amdgpu_cgs_create_device(adev);
> - mutex_init(&hwmgr->smu_lock);
> mutex_init(&hwmgr->msg_lock);
> hwmgr->chip_family = adev->family;
> hwmgr->chip_id = adev->asic_type;
> @@ -178,12 +177,9 @@ static int pp_late_init(void *handle)
> struct amdgpu_device *adev = handle;
> struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
>
> - if (hwmgr && hwmgr->pm_en) {
> - mutex_lock(&hwmgr->smu_lock);
> + if (hwmgr && hwmgr->pm_en)
> hwmgr_handle_task(hwmgr,
> AMD_PP_TASK_COMPLETE_INIT,
> NULL);
> - mutex_unlock(&hwmgr->smu_lock);
> - }
> if (adev->pm.smu_prv_buffer_size != 0)
> pp_reserve_vram_for_smu(adev);
>
> @@ -345,11 +341,9 @@ static int pp_dpm_force_performance_level(void
> *handle,
> if (level == hwmgr->dpm_level)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> pp_dpm_en_umd_pstate(hwmgr, &level);
> hwmgr->request_dpm_level = level;
> hwmgr_handle_task(hwmgr,
> AMD_PP_TASK_READJUST_POWER_STATE, NULL);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -358,21 +352,16 @@ static enum amd_dpm_forced_level
> pp_dpm_get_performance_level(
> void *handle)
> {
> struct pp_hwmgr *hwmgr = handle;
> - enum amd_dpm_forced_level level;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - level = hwmgr->dpm_level;
> - mutex_unlock(&hwmgr->smu_lock);
> - return level;
> + return hwmgr->dpm_level;
> }
>
> static uint32_t pp_dpm_get_sclk(void *handle, bool low) {
> struct pp_hwmgr *hwmgr = handle;
> - uint32_t clk = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return 0;
> @@ -381,16 +370,12 @@ static uint32_t pp_dpm_get_sclk(void *handle,
> bool low)
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
> - mutex_unlock(&hwmgr->smu_lock);
> - return clk;
> + return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
> }
>
> static uint32_t pp_dpm_get_mclk(void *handle, bool low) {
> struct pp_hwmgr *hwmgr = handle;
> - uint32_t clk = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return 0;
> @@ -399,10 +384,7 @@ static uint32_t pp_dpm_get_mclk(void *handle,
> bool low)
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
> - mutex_unlock(&hwmgr->smu_lock);
> - return clk;
> + return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
> }
>
> static void pp_dpm_powergate_vce(void *handle, bool gate) @@ -416,9
> +398,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return;
> }
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
> - mutex_unlock(&hwmgr->smu_lock);
> }
>
> static void pp_dpm_powergate_uvd(void *handle, bool gate) @@ -432,25
> +412,18 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return;
> }
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
> - mutex_unlock(&hwmgr->smu_lock);
> }
>
> static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
> enum amd_pm_state_type *user_state) {
> - int ret = 0;
> struct pp_hwmgr *hwmgr = handle;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr_handle_task(hwmgr, task_id, user_state);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr_handle_task(hwmgr, task_id, user_state);
> }
>
> static enum amd_pm_state_type pp_dpm_get_current_power_state(void
> *handle) @@ -462,8 +435,6 @@ static enum amd_pm_state_type
> pp_dpm_get_current_power_state(void *handle)
> if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> state = hwmgr->current_ps;
>
> switch (state->classification.ui_label) { @@ -483,7 +454,6 @@ static
> enum amd_pm_state_type pp_dpm_get_current_power_state(void
> *handle)
> pm_type = POWER_STATE_TYPE_DEFAULT;
> break;
> }
> - mutex_unlock(&hwmgr->smu_lock);
>
> return pm_type;
> }
> @@ -501,9 +471,7 @@ static int pp_dpm_set_fan_control_mode(void
> *handle, uint32_t mode)
> if (mode == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -521,16 +489,13 @@ static int pp_dpm_get_fan_control_mode(void
> *handle, uint32_t *fan_mode)
> if (!fan_mode)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> *fan_mode = hwmgr->hwmgr_func-
> >get_fan_control_mode(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -541,16 +506,12 @@ static int pp_dpm_set_fan_speed_pwm(void
> *handle, uint32_t speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
> }
>
> static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -561,16 +522,12 @@ static int pp_dpm_get_fan_speed_pwm(void
> *handle, uint32_t *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
> }
>
> static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -581,16 +538,12 @@ static int pp_dpm_get_fan_speed_rpm(void
> *handle, uint32_t *rpm)
> if (!rpm)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
> }
>
> static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EOPNOTSUPP;
> @@ -601,10 +554,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle,
> uint32_t rpm)
> if (rpm == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
> }
>
> static int pp_dpm_get_pp_num_states(void *handle, @@ -618,8 +568,6 @@
> static int pp_dpm_get_pp_num_states(void *handle,
> if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> data->nums = hwmgr->num_ps;
>
> for (i = 0; i < hwmgr->num_ps; i++) { @@ -642,23 +590,18 @@ static
> int pp_dpm_get_pp_num_states(void *handle,
> data->states[i] =
> POWER_STATE_TYPE_DEFAULT;
> }
> }
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> static int pp_dpm_get_pp_table(void *handle, char **table) {
> struct pp_hwmgr *hwmgr = handle;
> - int size = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> *table = (char *)hwmgr->soft_pp_table;
> - size = hwmgr->soft_pp_table_size;
> - mutex_unlock(&hwmgr->smu_lock);
> - return size;
> + return hwmgr->soft_pp_table_size;
> }
>
> static int amd_powerplay_reset(void *handle) @@ -685,13 +628,12 @@
> static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> if (!hwmgr->hardcode_pp_table) {
> hwmgr->hardcode_pp_table = kmemdup(hwmgr-
> >soft_pp_table,
> hwmgr-
> >soft_pp_table_size,
> GFP_KERNEL);
> if (!hwmgr->hardcode_pp_table)
> - goto err;
> + return ret;
> }
>
> memcpy(hwmgr->hardcode_pp_table, buf, size); @@ -700,17
> +642,11 @@ static int pp_dpm_set_pp_table(void *handle, const char
> +*buf,
> size_t size)
>
> ret = amd_powerplay_reset(handle);
> if (ret)
> - goto err;
> + return ret;
>
> - if (hwmgr->hwmgr_func->avfs_control) {
> + if (hwmgr->hwmgr_func->avfs_control)
> ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
> - if (ret)
> - goto err;
> - }
> - mutex_unlock(&hwmgr->smu_lock);
> - return 0;
> -err:
> - mutex_unlock(&hwmgr->smu_lock);
> +
> return ret;
> }
>
> @@ -718,7 +654,6 @@ static int pp_dpm_force_clock_level(void *handle,
> enum pp_clock_type type, uint32_t mask) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -733,17 +668,13 @@ static int pp_dpm_force_clock_level(void *handle,
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->force_clock_level(hwmgr, type,
> mask);
> }
>
> static int pp_dpm_print_clock_levels(void *handle,
> enum pp_clock_type type, char *buf) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -752,16 +683,12 @@ static int pp_dpm_print_clock_levels(void *handle,
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
> }
>
> static int pp_dpm_get_sclk_od(void *handle) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -770,16 +697,12 @@ static int pp_dpm_get_sclk_od(void *handle)
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
> }
>
> static int pp_dpm_set_sclk_od(void *handle, uint32_t value) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -789,16 +712,12 @@ static int pp_dpm_set_sclk_od(void *handle,
> uint32_t value)
> return 0;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
> }
>
> static int pp_dpm_get_mclk_od(void *handle) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -807,16 +726,12 @@ static int pp_dpm_get_mclk_od(void *handle)
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
> }
>
> static int pp_dpm_set_mclk_od(void *handle, uint32_t value) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -825,17 +740,13 @@ static int pp_dpm_set_mclk_od(void *handle,
> uint32_t value)
> pr_info_ratelimited("%s was not implemented.\n", __func__);
> return 0;
> }
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
> }
>
> static int pp_dpm_read_sensor(void *handle, int idx,
> void *value, int *size)
> {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en || !value)
> return -EINVAL;
> @@ -854,10 +765,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
> *((uint32_t *)value) = hwmgr-
> >thermal_controller.fanInfo.ulMaxRPM;
> return 0;
> default:
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value,
> size);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->read_sensor(hwmgr, idx,
> value, size);
> }
> }
>
> @@ -877,36 +785,28 @@ pp_dpm_get_vce_clock_state(void *handle,
> unsigned idx) static int pp_get_power_profile_mode(void *handle, char
> *buf) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret;
>
> if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func-
> >get_power_profile_mode)
> return -EOPNOTSUPP;
> if (!buf)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr,
> buf);
> }
>
> static int pp_set_power_profile_mode(void *handle, long *input,
> uint32_t
> size) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = -EOPNOTSUPP;
>
> if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func-
> >set_power_profile_mode)
> - return ret;
> + return -EOPNOTSUPP;
>
> if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> pr_debug("power profile setting is for manual dpm mode only.\n");
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> input, size);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> input, size);
> }
>
> static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type,
> long *input, uint32_t size) @@ -971,8 +871,6 @@ static int
> pp_dpm_switch_power_profile(void *handle,
> if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> if (!en) {
> hwmgr->workload_mask &= ~(1 << hwmgr-
> >workload_prority[type]);
> index = fls(hwmgr->workload_mask);
> @@ -987,15 +885,12 @@ static int pp_dpm_switch_power_profile(void
> *handle,
>
> if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
> hwmgr->hwmgr_func-
> >disable_power_features_for_compute_performance) {
> - if (hwmgr->hwmgr_func-
> >disable_power_features_for_compute_performance(hwmgr, en)) {
> - mutex_unlock(&hwmgr->smu_lock);
> + if
> +(hwmgr->hwmgr_func-
> >disable_power_features_for_compute_performance(hwmg
> +r, en))
> return -EINVAL;
> - }
> }
>
> if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> &workload, 0);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1025,10 +920,8 @@ static int pp_set_power_limit(void *handle,
> uint32_t limit)
> if (limit > max_power_limit)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
> hwmgr->power_limit = limit;
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> @@ -1045,8 +938,6 @@ static int pp_get_power_limit(void *handle,
> uint32_t *limit,
> if (power_type != PP_PWR_TYPE_SUSTAINED)
> return -EOPNOTSUPP;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> switch (pp_limit_level) {
> case PP_PWR_LIMIT_CURRENT:
> *limit = hwmgr->power_limit;
> @@ -1066,8 +957,6 @@ static int pp_get_power_limit(void *handle,
> uint32_t *limit,
> break;
> }
>
> - mutex_unlock(&hwmgr->smu_lock);
> -
> return ret;
> }
>
> @@ -1079,9 +968,7 @@ static int pp_display_configuration_change(void
> *handle,
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> phm_store_dal_configuration_data(hwmgr, display_config);
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> @@ -1089,15 +976,11 @@ static int pp_get_display_power_level(void
> *handle,
> struct amd_pp_simple_clock_info *output) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!output)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_get_dal_power_level(hwmgr, output);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_dal_power_level(hwmgr, output);
> }
>
> static int pp_get_current_clocks(void *handle, @@ -1111,8 +994,6 @@
> static int pp_get_current_clocks(void *handle,
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> phm_get_dal_power_level(hwmgr, &simple_clocks);
>
> if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
> @@ -1125,7 +1006,6 @@ static int pp_get_current_clocks(void *handle,
>
> if (ret) {
> pr_debug("Error in phm_get_clock_info \n");
> - mutex_unlock(&hwmgr->smu_lock);
> return -EINVAL;
> }
>
> @@ -1148,14 +1028,12 @@ static int pp_get_current_clocks(void *handle,
> clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
> clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
> }
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type
> type, struct amd_pp_clocks *clocks) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1163,10 +1041,7 @@ static int pp_get_clock_by_type(void *handle,
> enum amd_pp_clock_type type, struc
> if (clocks == NULL)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_get_clock_by_type(hwmgr, type, clocks);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_clock_by_type(hwmgr, type, clocks);
> }
>
> static int pp_get_clock_by_type_with_latency(void *handle, @@
> -1174,15
> +1049,11 @@ static int pp_get_clock_by_type_with_latency(void *handle,
> struct pp_clock_levels_with_latency *clocks) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!clocks)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
> }
>
> static int pp_get_clock_by_type_with_voltage(void *handle, @@
> -1190,50
> +1061,34 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
> struct pp_clock_levels_with_voltage *clocks) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!clocks)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> - ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
> -
> - mutex_unlock(&hwmgr->smu_lock);
> - return ret;
> + return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
> }
>
> static int pp_set_watermarks_for_clocks_ranges(void *handle,
> void *clock_ranges)
> {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
> - clock_ranges);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return phm_set_watermarks_for_clocks_ranges(hwmgr,
> + clock_ranges);
> }
>
> static int pp_display_clock_voltage_request(void *handle,
> struct pp_display_clock_request *clock) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en ||!clock)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_display_clock_voltage_request(hwmgr, clock);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return phm_display_clock_voltage_request(hwmgr, clock);
> }
>
> static int pp_get_display_mode_validation_clocks(void *handle, @@ -
> 1247,12 +1102,9 @@ static int
> pp_get_display_mode_validation_clocks(void
> *handle,
>
> clocks->level = PP_DAL_POWERLEVEL_7;
>
> - mutex_lock(&hwmgr->smu_lock);
> -
> if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
> PHM_PlatformCaps_DynamicPatchPowerState))
> ret = phm_get_max_high_clocks(hwmgr, clocks);
>
> - mutex_unlock(&hwmgr->smu_lock);
> return ret;
> }
>
> @@ -1364,9 +1216,7 @@ static int pp_notify_smu_enable_pwe(void
> *handle)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1382,9 +1232,7 @@ static int pp_enable_mgpu_fan_boost(void
> *handle)
> hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1401,9 +1249,7 @@ static int pp_set_min_deep_sleep_dcefclk(void
> *handle, uint32_t clock)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1420,9 +1266,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void
> *handle, uint32_t clock)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr,
> clock);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1439,9 +1283,7 @@ static int pp_set_hard_min_fclk_by_freq(void
> *handle, uint32_t clock)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1449,16 +1291,11 @@ static int pp_set_hard_min_fclk_by_freq(void
> *handle, uint32_t clock) static int pp_set_active_display_count(void
> *handle, uint32_t count) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = phm_set_active_display_count(hwmgr, count);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return phm_set_active_display_count(hwmgr, count);
> }
>
> static int pp_get_asic_baco_capability(void *handle, bool *cap) @@
> -1473,9
> +1310,7 @@ static int pp_get_asic_baco_capability(void *handle, bool
> +*cap)
> !hwmgr->hwmgr_func->get_asic_baco_capability)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1490,9 +1325,7 @@ static int pp_get_asic_baco_state(void *handle,
> int
> *state)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE
> *)state);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1508,9 +1341,7 @@ static int pp_set_asic_baco_state(void *handle,
> int
> state)
> !hwmgr->hwmgr_func->set_asic_baco_state)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum
> BACO_STATE)state);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1518,7 +1349,6 @@ static int pp_set_asic_baco_state(void *handle,
> int
> state) static int pp_get_ppfeature_status(void *handle, char *buf) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en || !buf)
> return -EINVAL;
> @@ -1528,17 +1358,12 @@ static int pp_get_ppfeature_status(void
> *handle, char *buf)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
> }
>
> static int pp_set_ppfeature_status(void *handle, uint64_t
> ppfeature_masks) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1548,17 +1373,12 @@ static int pp_set_ppfeature_status(void
> *handle, uint64_t ppfeature_masks)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr,
> ppfeature_masks);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr,
> +ppfeature_masks);
> }
>
> static int pp_asic_reset_mode_2(void *handle) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1568,17 +1388,12 @@ static int pp_asic_reset_mode_2(void *handle)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->asic_reset(hwmgr,
> SMU_ASIC_RESET_MODE_2);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->asic_reset(hwmgr,
> SMU_ASIC_RESET_MODE_2);
> }
>
> static int pp_smu_i2c_bus_access(void *handle, bool acquire) {
> struct pp_hwmgr *hwmgr = handle;
> - int ret = 0;
>
> if (!hwmgr || !hwmgr->pm_en)
> return -EINVAL;
> @@ -1588,11 +1403,7 @@ static int pp_smu_i2c_bus_access(void *handle,
> bool acquire)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> - ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return ret;
> + return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
> }
>
> static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) @@
> -
> 1605,9 +1416,7 @@ static int pp_set_df_cstate(void *handle, enum
> pp_df_cstate state)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1622,9 +1431,7 @@ static int pp_set_xgmi_pstate(void *handle,
> uint32_t pstate)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
> return 0;
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> @@ -1632,7 +1439,6 @@ static int pp_set_xgmi_pstate(void *handle,
> uint32_t pstate) static ssize_t pp_get_gpu_metrics(void *handle, void
> **table) {
> struct pp_hwmgr *hwmgr = handle;
> - ssize_t size;
>
> if (!hwmgr)
> return -EINVAL;
> @@ -1640,11 +1446,7 @@ static ssize_t pp_get_gpu_metrics(void *handle,
> void **table)
> if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
> return -EOPNOTSUPP;
>
> - mutex_lock(&hwmgr->smu_lock);
> - size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
> - mutex_unlock(&hwmgr->smu_lock);
> -
> - return size;
> + return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
> }
>
> static int pp_gfx_state_change_set(void *handle, uint32_t state) @@ -
> 1659,9 +1461,7 @@ static int pp_gfx_state_change_set(void *handle,
> uint32_t state)
> return -EINVAL;
> }
>
> - mutex_lock(&hwmgr->smu_lock);
> hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
> - mutex_unlock(&hwmgr->smu_lock);
> return 0;
> }
>
> @@ -1675,12 +1475,10 @@ static int pp_get_prv_buffer_details(void
> *handle, void **addr, size_t *size)
>
> *addr = NULL;
> *size = 0;
> - mutex_lock(&hwmgr->smu_lock);
> if (adev->pm.smu_prv_buffer) {
> amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
> *size = adev->pm.smu_prv_buffer_size;
> }
> - mutex_unlock(&hwmgr->smu_lock);
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> index 03226baea65e..4f7f2f455301 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> @@ -748,7 +748,6 @@ struct pp_hwmgr {
> bool not_vf;
> bool pm_en;
> bool pp_one_vf;
> - struct mutex smu_lock;
> struct mutex msg_lock;
>
> uint32_t pp_table_version;
> --
> 2.29.0
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
` (6 preceding siblings ...)
2022-01-20 13:37 ` [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Chen, Guchun
@ 2022-01-20 15:23 ` Lazar, Lijo
2022-01-21 7:08 ` Quan, Evan
2022-01-20 15:59 ` Lazar, Lijo
8 siblings, 1 reply; 14+ messages in thread
From: Lazar, Lijo @ 2022-01-20 15:23 UTC (permalink / raw)
To: Evan Quan, amd-gfx; +Cc: Alexander.Deucher, Guchun.Chen
On 1/17/2022 11:11 AM, Evan Quan wrote:
> As all those APIs are already protected either by adev->pm.mutex
> or smu->message_lock.
>
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I1db751fba9caabc5ca1314992961d3674212f9b0
> ---
> drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 315 ++----------------
> drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
> .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 -
> .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 -
> .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 -
> .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 -
> 6 files changed, 25 insertions(+), 299 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 828cb932f6a9..411f03eb4523 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -55,8 +55,7 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
> uint32_t mask);
> static int smu_handle_task(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> - enum amd_pp_task task_id,
> - bool lock_needed);
> + enum amd_pp_task task_id);
> static int smu_reset(struct smu_context *smu);
> static int smu_set_fan_speed_pwm(void *handle, u32 speed);
> static int smu_set_fan_control_mode(void *handle, u32 value);
> @@ -68,36 +67,22 @@ static int smu_sys_get_pp_feature_mask(void *handle,
> char *buf)
> {
> struct smu_context *smu = handle;
> - int size = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - size = smu_get_pp_feature_mask(smu, buf);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return size;
> + return smu_get_pp_feature_mask(smu, buf);
> }
>
> static int smu_sys_set_pp_feature_mask(void *handle,
> uint64_t new_mask)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_pp_feature_mask(smu, new_mask);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_pp_feature_mask(smu, new_mask);
> }
>
> int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
> @@ -117,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu,
> {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_soft_freq_limited_range)
> ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
> clk_type,
> min,
> max);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -140,16 +121,12 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
> if (!min && !max)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_dpm_ultimate_freq)
> ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
> clk_type,
> min,
> max);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -482,7 +459,6 @@ static int smu_sys_get_pp_table(void *handle,
> {
> struct smu_context *smu = handle;
> struct smu_table_context *smu_table = &smu->smu_table;
> - uint32_t powerplay_table_size;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -490,18 +466,12 @@ static int smu_sys_get_pp_table(void *handle,
> if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu_table->hardcode_pptable)
> *table = smu_table->hardcode_pptable;
> else
> *table = smu_table->power_play_table;
>
> - powerplay_table_size = smu_table->power_play_table_size;
> -
> - mutex_unlock(&smu->mutex);
> -
> - return powerplay_table_size;
> + return smu_table->power_play_table_size;
> }
>
> static int smu_sys_set_pp_table(void *handle,
> @@ -521,13 +491,10 @@ static int smu_sys_set_pp_table(void *handle,
> return -EIO;
> }
>
> - mutex_lock(&smu->mutex);
> if (!smu_table->hardcode_pptable)
> smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
> - if (!smu_table->hardcode_pptable) {
> - ret = -ENOMEM;
> - goto failed;
> - }
> + if (!smu_table->hardcode_pptable)
> + return -ENOMEM;
>
> memcpy(smu_table->hardcode_pptable, buf, size);
> smu_table->power_play_table = smu_table->hardcode_pptable;
> @@ -545,8 +512,6 @@ static int smu_sys_set_pp_table(void *handle,
>
> smu->uploading_custom_pp_table = false;
>
> -failed:
> - mutex_unlock(&smu->mutex);
> return ret;
> }
>
> @@ -633,7 +598,6 @@ static int smu_early_init(void *handle)
> smu->adev = adev;
> smu->pm_enabled = !!amdgpu_dpm;
> smu->is_apu = false;
> - mutex_init(&smu->mutex);
> mutex_init(&smu->smu_baco.mutex);
> smu->smu_baco.state = SMU_BACO_STATE_EXIT;
> smu->smu_baco.platform_support = false;
> @@ -736,8 +700,7 @@ static int smu_late_init(void *handle)
>
> smu_handle_task(smu,
> smu->smu_dpm.dpm_level,
> - AMD_PP_TASK_COMPLETE_INIT,
> - false);
> + AMD_PP_TASK_COMPLETE_INIT);
>
> smu_restore_dpm_user_profile(smu);
>
> @@ -1013,12 +976,8 @@ static void smu_interrupt_work_fn(struct work_struct *work)
> struct smu_context *smu = container_of(work, struct smu_context,
> interrupt_work);
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
> smu->ppt_funcs->interrupt_work(smu);
> -
> - mutex_unlock(&smu->mutex);
> }
>
> static int smu_sw_init(void *handle)
> @@ -1632,8 +1591,6 @@ static int smu_display_configuration_change(void *handle,
> if (!display_config)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> smu_set_min_dcef_deep_sleep(smu,
> display_config->min_dcef_deep_sleep_set_clk / 100);
>
> @@ -1642,8 +1599,6 @@ static int smu_display_configuration_change(void *handle,
> num_of_active_display++;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -1766,22 +1721,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
>
> static int smu_handle_task(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> - enum amd_pp_task task_id,
> - bool lock_needed)
> + enum amd_pp_task task_id)
> {
> int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - if (lock_needed)
> - mutex_lock(&smu->mutex);
> -
> switch (task_id) {
> case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
> ret = smu_pre_display_config_changed(smu);
> if (ret)
> - goto out;
> + return ret;
> ret = smu_adjust_power_state_dynamic(smu, level, false);
> break;
> case AMD_PP_TASK_COMPLETE_INIT:
> @@ -1792,10 +1743,6 @@ static int smu_handle_task(struct smu_context *smu,
> break;
> }
>
> -out:
> - if (lock_needed)
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -1806,7 +1753,7 @@ static int smu_handle_dpm_task(void *handle,
> struct smu_context *smu = handle;
> struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
>
> - return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
> + return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
>
> }
>
> @@ -1825,8 +1772,6 @@ static int smu_switch_power_profile(void *handle,
> if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (!en) {
> smu->workload_mask &= ~(1 << smu->workload_prority[type]);
> index = fls(smu->workload_mask);
> @@ -1843,8 +1788,6 @@ static int smu_switch_power_profile(void *handle,
> smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
> smu_bump_power_profile_mode(smu, &workload, 0);
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -1852,7 +1795,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
> {
> struct smu_context *smu = handle;
> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> - enum amd_dpm_forced_level level;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -1860,11 +1802,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
> if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> return -EINVAL;
>
> - mutex_lock(&(smu->mutex));
> - level = smu_dpm_ctx->dpm_level;
> - mutex_unlock(&(smu->mutex));
> -
> - return level;
> + return smu_dpm_ctx->dpm_level;
> }
>
> static int smu_force_performance_level(void *handle,
> @@ -1880,19 +1818,12 @@ static int smu_force_performance_level(void *handle,
> if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu_enable_umd_pstate(smu, &level);
> - if (ret) {
> - mutex_unlock(&smu->mutex);
> + if (ret)
> return ret;
> - }
>
> ret = smu_handle_task(smu, level,
> - AMD_PP_TASK_READJUST_POWER_STATE,
> - false);
> -
> - mutex_unlock(&smu->mutex);
> + AMD_PP_TASK_READJUST_POWER_STATE);
>
> /* reset user dpm clock state */
> if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> @@ -1906,16 +1837,11 @@ static int smu_force_performance_level(void *handle,
> static int smu_set_display_count(void *handle, uint32_t count)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> - ret = smu_init_display_count(smu, count);
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_init_display_count(smu, count);
> }
>
> static int smu_force_smuclk_levels(struct smu_context *smu,
> @@ -1933,8 +1859,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
> return -EINVAL;
> }
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
> ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
> if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
> @@ -1943,8 +1867,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
> }
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2003,14 +1925,10 @@ static int smu_set_mp1_state(void *handle,
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs &&
> smu->ppt_funcs->set_mp1_state)
> ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2026,14 +1944,10 @@ static int smu_set_df_cstate(void *handle,
> if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_df_cstate(smu, state);
> if (ret)
> dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2047,38 +1961,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
> if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
> if (ret)
> dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> int smu_write_watermarks_table(struct smu_context *smu)
> {
> - int ret = 0;
> -
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_watermarks_table(smu, NULL);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_watermarks_table(smu, NULL);
> }
>
> static int smu_set_watermarks_for_clock_ranges(void *handle,
> struct pp_smu_wm_range_sets *clock_ranges)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -2086,13 +1987,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
> if (smu->disable_watermark)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_watermarks_table(smu, clock_ranges);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_watermarks_table(smu, clock_ranges);
> }
>
> int smu_set_ac_dc(struct smu_context *smu)
> @@ -2106,14 +2001,12 @@ int smu_set_ac_dc(struct smu_context *smu)
> if (smu->dc_controlled_by_gpio)
> return 0;
>
> - mutex_lock(&smu->mutex);
> ret = smu_set_power_source(smu,
> smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
> SMU_POWER_SOURCE_DC);
> if (ret)
> dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
> smu->adev->pm.ac_power ? "AC" : "DC");
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -2200,13 +2093,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
> {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_gfx_cgpg)
> ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2224,8 +2113,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
> if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
> smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
> @@ -2236,8 +2123,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
> smu->user_dpm_profile.fan_speed_pwm = 0;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2293,8 +2178,6 @@ int smu_get_power_limit(void *handle,
> break;
> }
>
> - mutex_lock(&smu->mutex);
> -
> if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
> if (smu->ppt_funcs->get_ppt_limit)
> ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
> @@ -2328,8 +2211,6 @@ int smu_get_power_limit(void *handle,
> }
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2342,21 +2223,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> limit &= (1<<24)-1;
> if (limit_type != SMU_DEFAULT_PPT_LIMIT)
> - if (smu->ppt_funcs->set_power_limit) {
> - ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
> - goto out;
> - }
> + if (smu->ppt_funcs->set_power_limit)
> + return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
>
> if (limit > smu->max_power_limit) {
> dev_err(smu->adev->dev,
> "New power limit (%d) is over the max allowed %d\n",
> limit, smu->max_power_limit);
> - ret = -EINVAL;
> - goto out;
> + return -EINVAL;
> }
>
> if (!limit)
> @@ -2368,9 +2244,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
> smu->user_dpm_profile.power_limit = limit;
> }
>
> -out:
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2381,13 +2254,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->print_clk_levels)
> ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2444,14 +2313,10 @@ static int smu_od_edit_dpm_table(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->od_edit_dpm_table) {
> ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2475,8 +2340,6 @@ static int smu_read_sensor(void *handle,
> size_val = *size_arg;
> size = &size_val;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->read_sensor)
> if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
> goto unlock;
> @@ -2517,8 +2380,6 @@ static int smu_read_sensor(void *handle,
> }
>
> unlock:
> - mutex_unlock(&smu->mutex);
> -
> // assign uint32_t to int
> *size_arg = size_val;
>
> @@ -2528,7 +2389,6 @@ static int smu_read_sensor(void *handle,
> static int smu_get_power_profile_mode(void *handle, char *buf)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> !smu->ppt_funcs->get_power_profile_mode)
> @@ -2536,13 +2396,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
> if (!buf)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu->ppt_funcs->get_power_profile_mode(smu, buf);
> }
>
> static int smu_set_power_profile_mode(void *handle,
> @@ -2550,19 +2404,12 @@ static int smu_set_power_profile_mode(void *handle,
> uint32_t param_size)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> !smu->ppt_funcs->set_power_profile_mode)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - smu_bump_power_profile_mode(smu, param, param_size);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_bump_power_profile_mode(smu, param, param_size);
> }
>
>
> @@ -2579,12 +2426,8 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
> if (!fan_mode)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -2602,8 +2445,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
> if (value == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
> if (ret)
> goto out;
> @@ -2620,8 +2461,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
> }
>
> out:
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2639,12 +2478,8 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2662,8 +2497,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
> if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
> smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
> @@ -2674,8 +2507,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
> smu->user_dpm_profile.fan_speed_rpm = 0;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2693,30 +2524,19 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_min_dcef_deep_sleep(smu, clk);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_min_dcef_deep_sleep(smu, clk);
> }
>
> static int smu_get_clock_by_type_with_latency(void *handle,
> @@ -2730,8 +2550,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_clock_by_type_with_latency) {
> switch (type) {
> case amd_pp_sys_clock:
> @@ -2748,15 +2566,12 @@ static int smu_get_clock_by_type_with_latency(void *handle,
> break;
> default:
> dev_err(smu->adev->dev, "Invalid clock type!\n");
> - mutex_unlock(&smu->mutex);
> return -EINVAL;
> }
>
> ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2769,13 +2584,9 @@ static int smu_display_clock_voltage_request(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->display_clock_voltage_request)
> ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2789,13 +2600,9 @@ static int smu_display_disable_memory_clock_switch(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->display_disable_memory_clock_switch)
> ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2808,13 +2615,9 @@ static int smu_set_xgmi_pstate(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_xgmi_pstate)
> ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
>
> - mutex_unlock(&smu->mutex);
> -
> if(ret)
> dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
>
> @@ -2824,21 +2627,16 @@ static int smu_set_xgmi_pstate(void *handle,
> static int smu_get_baco_capability(void *handle, bool *cap)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> *cap = false;
>
> if (!smu->pm_enabled)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
> *cap = smu->ppt_funcs->baco_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return 0;
> }
>
> static int smu_baco_set_state(void *handle, int state)
> @@ -2850,20 +2648,11 @@ static int smu_baco_set_state(void *handle, int state)
> return -EOPNOTSUPP;
>
> if (state == 0) {
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->baco_exit)
> ret = smu->ppt_funcs->baco_exit(smu);
> -
> - mutex_unlock(&smu->mutex);
> } else if (state == 1) {
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->baco_enter)
> ret = smu->ppt_funcs->baco_enter(smu);
> -
> - mutex_unlock(&smu->mutex);
> -
> } else {
> return -EINVAL;
> }
> @@ -2882,13 +2671,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
> if (!smu->pm_enabled)
> return false;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
> ret = smu->ppt_funcs->mode1_reset_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2899,13 +2684,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu)
> if (!smu->pm_enabled)
> return false;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
> ret = smu->ppt_funcs->mode2_reset_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2916,13 +2697,9 @@ int smu_mode1_reset(struct smu_context *smu)
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->mode1_reset)
> ret = smu->ppt_funcs->mode1_reset(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2934,13 +2711,9 @@ static int smu_mode2_reset(void *handle)
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->mode2_reset)
> ret = smu->ppt_funcs->mode2_reset(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> if (ret)
> dev_err(smu->adev->dev, "Mode2 reset failed!\n");
>
> @@ -2956,13 +2729,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
> ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2976,13 +2745,9 @@ static int smu_get_uclk_dpm_states(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_uclk_dpm_states)
> ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2994,13 +2759,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_current_power_state)
> pm_state = smu->ppt_funcs->get_current_power_state(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return pm_state;
> }
>
> @@ -3013,20 +2774,15 @@ static int smu_get_dpm_clock_table(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_dpm_clock_table)
> ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
> {
> struct smu_context *smu = handle;
> - ssize_t size;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -3034,13 +2790,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
> if (!smu->ppt_funcs->get_gpu_metrics)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - size = smu->ppt_funcs->get_gpu_metrics(smu, table);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return size;
> + return smu->ppt_funcs->get_gpu_metrics(smu, table);
> }
>
> static int smu_enable_mgpu_fan_boost(void *handle)
> @@ -3051,13 +2801,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->enable_mgpu_fan_boost)
> ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -3067,10 +2813,8 @@ static int smu_gfx_state_change_set(void *handle,
> struct smu_context *smu = handle;
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs->gfx_state_change_set)
> ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -3079,10 +2823,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
> {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs->smu_handle_passthrough_sbr)
> ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -3091,11 +2833,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
> {
> int ret = -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs &&
> smu->ppt_funcs->get_ecc_info)
> ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
> - mutex_unlock(&smu->mutex);
>
> return ret;
>
> @@ -3112,12 +2852,10 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
>
> *addr = NULL;
> *size = 0;
> - mutex_lock(&smu->mutex);
> if (memory_pool->bo) {
> *addr = memory_pool->cpu_addr;
> *size = memory_pool->size;
> }
> - mutex_unlock(&smu->mutex);
>
> return 0;
> }
> @@ -3181,11 +2919,8 @@ int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
> {
> int ret = -EINVAL;
>
> - if (smu->ppt_funcs->wait_for_event) {
> - mutex_lock(&smu->mutex);
> + if (smu->ppt_funcs->wait_for_event)
> ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
> - mutex_unlock(&smu->mutex);
> - }
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index 3fdab6a44901..00760f3c6da5 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -488,7 +488,6 @@ struct smu_context
> const struct cmn2asic_mapping *table_map;
> const struct cmn2asic_mapping *pwr_src_map;
> const struct cmn2asic_mapping *workload_map;
> - struct mutex mutex;
> struct mutex sensor_lock;
> struct mutex metrics_lock;
> struct mutex message_lock;
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> index d3963bfe5c89..addb0472d040 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> @@ -2118,9 +2118,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 37e11716e919..fe17b3c1ece7 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -2826,9 +2826,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 9766870987db..93caaf45a2db 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -3483,9 +3483,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> index ac8ba5e0e697..2546f79c8511 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> @@ -1521,9 +1521,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
>
Could you check on i2c transfers? I don't see lock_ops implemented for
i2c control, and with this copy operation of table is not protected.
Thanks,
Lijo
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
` (7 preceding siblings ...)
2022-01-20 15:23 ` Lazar, Lijo
@ 2022-01-20 15:59 ` Lazar, Lijo
8 siblings, 0 replies; 14+ messages in thread
From: Lazar, Lijo @ 2022-01-20 15:59 UTC (permalink / raw)
To: Evan Quan, amd-gfx; +Cc: Alexander.Deucher, Guchun.Chen
Apart from patch 1, rest of the series is
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Patch 1 needs another look related to i2c transfers.
Thanks,
Lijo
On 1/17/2022 11:11 AM, Evan Quan wrote:
> As all those APIs are already protected either by adev->pm.mutex
> or smu->message_lock.
>
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I1db751fba9caabc5ca1314992961d3674212f9b0
> ---
> drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 315 ++----------------
> drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
> .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 -
> .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 -
> .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 -
> .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 -
> 6 files changed, 25 insertions(+), 299 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 828cb932f6a9..411f03eb4523 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -55,8 +55,7 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
> uint32_t mask);
> static int smu_handle_task(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> - enum amd_pp_task task_id,
> - bool lock_needed);
> + enum amd_pp_task task_id);
> static int smu_reset(struct smu_context *smu);
> static int smu_set_fan_speed_pwm(void *handle, u32 speed);
> static int smu_set_fan_control_mode(void *handle, u32 value);
> @@ -68,36 +67,22 @@ static int smu_sys_get_pp_feature_mask(void *handle,
> char *buf)
> {
> struct smu_context *smu = handle;
> - int size = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - size = smu_get_pp_feature_mask(smu, buf);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return size;
> + return smu_get_pp_feature_mask(smu, buf);
> }
>
> static int smu_sys_set_pp_feature_mask(void *handle,
> uint64_t new_mask)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_pp_feature_mask(smu, new_mask);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_pp_feature_mask(smu, new_mask);
> }
>
> int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
> @@ -117,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu,
> {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_soft_freq_limited_range)
> ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
> clk_type,
> min,
> max);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -140,16 +121,12 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
> if (!min && !max)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_dpm_ultimate_freq)
> ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
> clk_type,
> min,
> max);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -482,7 +459,6 @@ static int smu_sys_get_pp_table(void *handle,
> {
> struct smu_context *smu = handle;
> struct smu_table_context *smu_table = &smu->smu_table;
> - uint32_t powerplay_table_size;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -490,18 +466,12 @@ static int smu_sys_get_pp_table(void *handle,
> if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu_table->hardcode_pptable)
> *table = smu_table->hardcode_pptable;
> else
> *table = smu_table->power_play_table;
>
> - powerplay_table_size = smu_table->power_play_table_size;
> -
> - mutex_unlock(&smu->mutex);
> -
> - return powerplay_table_size;
> + return smu_table->power_play_table_size;
> }
>
> static int smu_sys_set_pp_table(void *handle,
> @@ -521,13 +491,10 @@ static int smu_sys_set_pp_table(void *handle,
> return -EIO;
> }
>
> - mutex_lock(&smu->mutex);
> if (!smu_table->hardcode_pptable)
> smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
> - if (!smu_table->hardcode_pptable) {
> - ret = -ENOMEM;
> - goto failed;
> - }
> + if (!smu_table->hardcode_pptable)
> + return -ENOMEM;
>
> memcpy(smu_table->hardcode_pptable, buf, size);
> smu_table->power_play_table = smu_table->hardcode_pptable;
> @@ -545,8 +512,6 @@ static int smu_sys_set_pp_table(void *handle,
>
> smu->uploading_custom_pp_table = false;
>
> -failed:
> - mutex_unlock(&smu->mutex);
> return ret;
> }
>
> @@ -633,7 +598,6 @@ static int smu_early_init(void *handle)
> smu->adev = adev;
> smu->pm_enabled = !!amdgpu_dpm;
> smu->is_apu = false;
> - mutex_init(&smu->mutex);
> mutex_init(&smu->smu_baco.mutex);
> smu->smu_baco.state = SMU_BACO_STATE_EXIT;
> smu->smu_baco.platform_support = false;
> @@ -736,8 +700,7 @@ static int smu_late_init(void *handle)
>
> smu_handle_task(smu,
> smu->smu_dpm.dpm_level,
> - AMD_PP_TASK_COMPLETE_INIT,
> - false);
> + AMD_PP_TASK_COMPLETE_INIT);
>
> smu_restore_dpm_user_profile(smu);
>
> @@ -1013,12 +976,8 @@ static void smu_interrupt_work_fn(struct work_struct *work)
> struct smu_context *smu = container_of(work, struct smu_context,
> interrupt_work);
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
> smu->ppt_funcs->interrupt_work(smu);
> -
> - mutex_unlock(&smu->mutex);
> }
>
> static int smu_sw_init(void *handle)
> @@ -1632,8 +1591,6 @@ static int smu_display_configuration_change(void *handle,
> if (!display_config)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> smu_set_min_dcef_deep_sleep(smu,
> display_config->min_dcef_deep_sleep_set_clk / 100);
>
> @@ -1642,8 +1599,6 @@ static int smu_display_configuration_change(void *handle,
> num_of_active_display++;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -1766,22 +1721,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
>
> static int smu_handle_task(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> - enum amd_pp_task task_id,
> - bool lock_needed)
> + enum amd_pp_task task_id)
> {
> int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - if (lock_needed)
> - mutex_lock(&smu->mutex);
> -
> switch (task_id) {
> case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
> ret = smu_pre_display_config_changed(smu);
> if (ret)
> - goto out;
> + return ret;
> ret = smu_adjust_power_state_dynamic(smu, level, false);
> break;
> case AMD_PP_TASK_COMPLETE_INIT:
> @@ -1792,10 +1743,6 @@ static int smu_handle_task(struct smu_context *smu,
> break;
> }
>
> -out:
> - if (lock_needed)
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -1806,7 +1753,7 @@ static int smu_handle_dpm_task(void *handle,
> struct smu_context *smu = handle;
> struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
>
> - return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
> + return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
>
> }
>
> @@ -1825,8 +1772,6 @@ static int smu_switch_power_profile(void *handle,
> if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (!en) {
> smu->workload_mask &= ~(1 << smu->workload_prority[type]);
> index = fls(smu->workload_mask);
> @@ -1843,8 +1788,6 @@ static int smu_switch_power_profile(void *handle,
> smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
> smu_bump_power_profile_mode(smu, &workload, 0);
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -1852,7 +1795,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
> {
> struct smu_context *smu = handle;
> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> - enum amd_dpm_forced_level level;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -1860,11 +1802,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
> if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> return -EINVAL;
>
> - mutex_lock(&(smu->mutex));
> - level = smu_dpm_ctx->dpm_level;
> - mutex_unlock(&(smu->mutex));
> -
> - return level;
> + return smu_dpm_ctx->dpm_level;
> }
>
> static int smu_force_performance_level(void *handle,
> @@ -1880,19 +1818,12 @@ static int smu_force_performance_level(void *handle,
> if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu_enable_umd_pstate(smu, &level);
> - if (ret) {
> - mutex_unlock(&smu->mutex);
> + if (ret)
> return ret;
> - }
>
> ret = smu_handle_task(smu, level,
> - AMD_PP_TASK_READJUST_POWER_STATE,
> - false);
> -
> - mutex_unlock(&smu->mutex);
> + AMD_PP_TASK_READJUST_POWER_STATE);
>
> /* reset user dpm clock state */
> if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> @@ -1906,16 +1837,11 @@ static int smu_force_performance_level(void *handle,
> static int smu_set_display_count(void *handle, uint32_t count)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> - ret = smu_init_display_count(smu, count);
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_init_display_count(smu, count);
> }
>
> static int smu_force_smuclk_levels(struct smu_context *smu,
> @@ -1933,8 +1859,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
> return -EINVAL;
> }
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
> ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
> if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
> @@ -1943,8 +1867,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
> }
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2003,14 +1925,10 @@ static int smu_set_mp1_state(void *handle,
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs &&
> smu->ppt_funcs->set_mp1_state)
> ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2026,14 +1944,10 @@ static int smu_set_df_cstate(void *handle,
> if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_df_cstate(smu, state);
> if (ret)
> dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2047,38 +1961,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
> if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
> if (ret)
> dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> int smu_write_watermarks_table(struct smu_context *smu)
> {
> - int ret = 0;
> -
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_watermarks_table(smu, NULL);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_watermarks_table(smu, NULL);
> }
>
> static int smu_set_watermarks_for_clock_ranges(void *handle,
> struct pp_smu_wm_range_sets *clock_ranges)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -2086,13 +1987,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
> if (smu->disable_watermark)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_watermarks_table(smu, clock_ranges);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_watermarks_table(smu, clock_ranges);
> }
>
> int smu_set_ac_dc(struct smu_context *smu)
> @@ -2106,14 +2001,12 @@ int smu_set_ac_dc(struct smu_context *smu)
> if (smu->dc_controlled_by_gpio)
> return 0;
>
> - mutex_lock(&smu->mutex);
> ret = smu_set_power_source(smu,
> smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
> SMU_POWER_SOURCE_DC);
> if (ret)
> dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
> smu->adev->pm.ac_power ? "AC" : "DC");
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -2200,13 +2093,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
> {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_gfx_cgpg)
> ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2224,8 +2113,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
> if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
> smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
> @@ -2236,8 +2123,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
> smu->user_dpm_profile.fan_speed_pwm = 0;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2293,8 +2178,6 @@ int smu_get_power_limit(void *handle,
> break;
> }
>
> - mutex_lock(&smu->mutex);
> -
> if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
> if (smu->ppt_funcs->get_ppt_limit)
> ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
> @@ -2328,8 +2211,6 @@ int smu_get_power_limit(void *handle,
> }
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2342,21 +2223,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> limit &= (1<<24)-1;
> if (limit_type != SMU_DEFAULT_PPT_LIMIT)
> - if (smu->ppt_funcs->set_power_limit) {
> - ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
> - goto out;
> - }
> + if (smu->ppt_funcs->set_power_limit)
> + return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
>
> if (limit > smu->max_power_limit) {
> dev_err(smu->adev->dev,
> "New power limit (%d) is over the max allowed %d\n",
> limit, smu->max_power_limit);
> - ret = -EINVAL;
> - goto out;
> + return -EINVAL;
> }
>
> if (!limit)
> @@ -2368,9 +2244,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
> smu->user_dpm_profile.power_limit = limit;
> }
>
> -out:
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2381,13 +2254,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->print_clk_levels)
> ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2444,14 +2313,10 @@ static int smu_od_edit_dpm_table(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->od_edit_dpm_table) {
> ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2475,8 +2340,6 @@ static int smu_read_sensor(void *handle,
> size_val = *size_arg;
> size = &size_val;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->read_sensor)
> if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
> goto unlock;
> @@ -2517,8 +2380,6 @@ static int smu_read_sensor(void *handle,
> }
>
> unlock:
> - mutex_unlock(&smu->mutex);
> -
> // assign uint32_t to int
> *size_arg = size_val;
>
> @@ -2528,7 +2389,6 @@ static int smu_read_sensor(void *handle,
> static int smu_get_power_profile_mode(void *handle, char *buf)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> !smu->ppt_funcs->get_power_profile_mode)
> @@ -2536,13 +2396,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
> if (!buf)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu->ppt_funcs->get_power_profile_mode(smu, buf);
> }
>
> static int smu_set_power_profile_mode(void *handle,
> @@ -2550,19 +2404,12 @@ static int smu_set_power_profile_mode(void *handle,
> uint32_t param_size)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> !smu->ppt_funcs->set_power_profile_mode)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - smu_bump_power_profile_mode(smu, param, param_size);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_bump_power_profile_mode(smu, param, param_size);
> }
>
>
> @@ -2579,12 +2426,8 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
> if (!fan_mode)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -2602,8 +2445,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
> if (value == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
> if (ret)
> goto out;
> @@ -2620,8 +2461,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
> }
>
> out:
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2639,12 +2478,8 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2662,8 +2497,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
> if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
> smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
> @@ -2674,8 +2507,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
> smu->user_dpm_profile.fan_speed_rpm = 0;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2693,30 +2524,19 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_min_dcef_deep_sleep(smu, clk);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_min_dcef_deep_sleep(smu, clk);
> }
>
> static int smu_get_clock_by_type_with_latency(void *handle,
> @@ -2730,8 +2550,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_clock_by_type_with_latency) {
> switch (type) {
> case amd_pp_sys_clock:
> @@ -2748,15 +2566,12 @@ static int smu_get_clock_by_type_with_latency(void *handle,
> break;
> default:
> dev_err(smu->adev->dev, "Invalid clock type!\n");
> - mutex_unlock(&smu->mutex);
> return -EINVAL;
> }
>
> ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2769,13 +2584,9 @@ static int smu_display_clock_voltage_request(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->display_clock_voltage_request)
> ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2789,13 +2600,9 @@ static int smu_display_disable_memory_clock_switch(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->display_disable_memory_clock_switch)
> ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2808,13 +2615,9 @@ static int smu_set_xgmi_pstate(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_xgmi_pstate)
> ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
>
> - mutex_unlock(&smu->mutex);
> -
> if(ret)
> dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
>
> @@ -2824,21 +2627,16 @@ static int smu_set_xgmi_pstate(void *handle,
> static int smu_get_baco_capability(void *handle, bool *cap)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> *cap = false;
>
> if (!smu->pm_enabled)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
> *cap = smu->ppt_funcs->baco_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return 0;
> }
>
> static int smu_baco_set_state(void *handle, int state)
> @@ -2850,20 +2648,11 @@ static int smu_baco_set_state(void *handle, int state)
> return -EOPNOTSUPP;
>
> if (state == 0) {
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->baco_exit)
> ret = smu->ppt_funcs->baco_exit(smu);
> -
> - mutex_unlock(&smu->mutex);
> } else if (state == 1) {
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->baco_enter)
> ret = smu->ppt_funcs->baco_enter(smu);
> -
> - mutex_unlock(&smu->mutex);
> -
> } else {
> return -EINVAL;
> }
> @@ -2882,13 +2671,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
> if (!smu->pm_enabled)
> return false;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
> ret = smu->ppt_funcs->mode1_reset_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2899,13 +2684,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu)
> if (!smu->pm_enabled)
> return false;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
> ret = smu->ppt_funcs->mode2_reset_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2916,13 +2697,9 @@ int smu_mode1_reset(struct smu_context *smu)
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->mode1_reset)
> ret = smu->ppt_funcs->mode1_reset(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2934,13 +2711,9 @@ static int smu_mode2_reset(void *handle)
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->mode2_reset)
> ret = smu->ppt_funcs->mode2_reset(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> if (ret)
> dev_err(smu->adev->dev, "Mode2 reset failed!\n");
>
> @@ -2956,13 +2729,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
> ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2976,13 +2745,9 @@ static int smu_get_uclk_dpm_states(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_uclk_dpm_states)
> ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2994,13 +2759,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_current_power_state)
> pm_state = smu->ppt_funcs->get_current_power_state(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return pm_state;
> }
>
> @@ -3013,20 +2774,15 @@ static int smu_get_dpm_clock_table(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_dpm_clock_table)
> ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
> {
> struct smu_context *smu = handle;
> - ssize_t size;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -3034,13 +2790,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
> if (!smu->ppt_funcs->get_gpu_metrics)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - size = smu->ppt_funcs->get_gpu_metrics(smu, table);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return size;
> + return smu->ppt_funcs->get_gpu_metrics(smu, table);
> }
>
> static int smu_enable_mgpu_fan_boost(void *handle)
> @@ -3051,13 +2801,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->enable_mgpu_fan_boost)
> ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -3067,10 +2813,8 @@ static int smu_gfx_state_change_set(void *handle,
> struct smu_context *smu = handle;
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs->gfx_state_change_set)
> ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -3079,10 +2823,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
> {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs->smu_handle_passthrough_sbr)
> ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -3091,11 +2833,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
> {
> int ret = -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs &&
> smu->ppt_funcs->get_ecc_info)
> ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
> - mutex_unlock(&smu->mutex);
>
> return ret;
>
> @@ -3112,12 +2852,10 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
>
> *addr = NULL;
> *size = 0;
> - mutex_lock(&smu->mutex);
> if (memory_pool->bo) {
> *addr = memory_pool->cpu_addr;
> *size = memory_pool->size;
> }
> - mutex_unlock(&smu->mutex);
>
> return 0;
> }
> @@ -3181,11 +2919,8 @@ int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
> {
> int ret = -EINVAL;
>
> - if (smu->ppt_funcs->wait_for_event) {
> - mutex_lock(&smu->mutex);
> + if (smu->ppt_funcs->wait_for_event)
> ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
> - mutex_unlock(&smu->mutex);
> - }
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index 3fdab6a44901..00760f3c6da5 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -488,7 +488,6 @@ struct smu_context
> const struct cmn2asic_mapping *table_map;
> const struct cmn2asic_mapping *pwr_src_map;
> const struct cmn2asic_mapping *workload_map;
> - struct mutex mutex;
> struct mutex sensor_lock;
> struct mutex metrics_lock;
> struct mutex message_lock;
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> index d3963bfe5c89..addb0472d040 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> @@ -2118,9 +2118,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 37e11716e919..fe17b3c1ece7 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -2826,9 +2826,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 9766870987db..93caaf45a2db 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -3483,9 +3483,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> index ac8ba5e0e697..2546f79c8511 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> @@ -1521,9 +1521,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
>
^ permalink raw reply [flat|nested] 14+ messages in thread
* RE: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex
2022-01-20 15:23 ` Lazar, Lijo
@ 2022-01-21 7:08 ` Quan, Evan
0 siblings, 0 replies; 14+ messages in thread
From: Quan, Evan @ 2022-01-21 7:08 UTC (permalink / raw)
To: Lazar, Lijo, amd-gfx; +Cc: Deucher, Alexander, Chen, Guchun
[AMD Official Use Only]
> -----Original Message-----
> From: Lazar, Lijo <Lijo.Lazar@amd.com>
> Sent: Thursday, January 20, 2022 11:23 PM
> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Chen, Guchun
> <Guchun.Chen@amd.com>
> Subject: Re: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection
> smu->mutex
>
>
>
> On 1/17/2022 11:11 AM, Evan Quan wrote:
> > As all those APIs are already protected either by adev->pm.mutex or
> > smu->message_lock.
> >
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > Change-Id: I1db751fba9caabc5ca1314992961d3674212f9b0
> > ---
> > drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 315 ++---------------
> -
> > drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
> > .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 -
> > .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 -
> > .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 -
> > .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 -
> > 6 files changed, 25 insertions(+), 299 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > index 828cb932f6a9..411f03eb4523 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > @@ -55,8 +55,7 @@ static int smu_force_smuclk_levels(struct
> smu_context *smu,
> > uint32_t mask);
> > static int smu_handle_task(struct smu_context *smu,
> > enum amd_dpm_forced_level level,
> > - enum amd_pp_task task_id,
> > - bool lock_needed);
> > + enum amd_pp_task task_id);
> > static int smu_reset(struct smu_context *smu);
> > static int smu_set_fan_speed_pwm(void *handle, u32 speed);
> > static int smu_set_fan_control_mode(void *handle, u32 value); @@
> > -68,36 +67,22 @@ static int smu_sys_get_pp_feature_mask(void *handle,
> > char *buf)
> > {
> > struct smu_context *smu = handle;
> > - int size = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - size = smu_get_pp_feature_mask(smu, buf);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return size;
> > + return smu_get_pp_feature_mask(smu, buf);
> > }
> >
> > static int smu_sys_set_pp_feature_mask(void *handle,
> > uint64_t new_mask)
> > {
> > struct smu_context *smu = handle;
> > - int ret = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - ret = smu_set_pp_feature_mask(smu, new_mask);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return smu_set_pp_feature_mask(smu, new_mask);
> > }
> >
> > int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
> > @@ -117,16 +102,12 @@ int smu_set_soft_freq_range(struct
> smu_context *smu,
> > {
> > int ret = 0;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->set_soft_freq_limited_range)
> > ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
> > clk_type,
> > min,
> > max);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -140,16 +121,12 @@ int smu_get_dpm_freq_range(struct
> smu_context *smu,
> > if (!min && !max)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->get_dpm_ultimate_freq)
> > ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
> > clk_type,
> > min,
> > max);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -482,7 +459,6 @@ static int smu_sys_get_pp_table(void *handle,
> > {
> > struct smu_context *smu = handle;
> > struct smu_table_context *smu_table = &smu->smu_table;
> > - uint32_t powerplay_table_size;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> > @@ -490,18 +466,12 @@ static int smu_sys_get_pp_table(void *handle,
> > if (!smu_table->power_play_table && !smu_table-
> >hardcode_pptable)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu_table->hardcode_pptable)
> > *table = smu_table->hardcode_pptable;
> > else
> > *table = smu_table->power_play_table;
> >
> > - powerplay_table_size = smu_table->power_play_table_size;
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return powerplay_table_size;
> > + return smu_table->power_play_table_size;
> > }
> >
> > static int smu_sys_set_pp_table(void *handle, @@ -521,13 +491,10 @@
> > static int smu_sys_set_pp_table(void *handle,
> > return -EIO;
> > }
> >
> > - mutex_lock(&smu->mutex);
> > if (!smu_table->hardcode_pptable)
> > smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
> > - if (!smu_table->hardcode_pptable) {
> > - ret = -ENOMEM;
> > - goto failed;
> > - }
> > + if (!smu_table->hardcode_pptable)
> > + return -ENOMEM;
> >
> > memcpy(smu_table->hardcode_pptable, buf, size);
> > smu_table->power_play_table = smu_table->hardcode_pptable;
> @@
> > -545,8 +512,6 @@ static int smu_sys_set_pp_table(void *handle,
> >
> > smu->uploading_custom_pp_table = false;
> >
> > -failed:
> > - mutex_unlock(&smu->mutex);
> > return ret;
> > }
> >
> > @@ -633,7 +598,6 @@ static int smu_early_init(void *handle)
> > smu->adev = adev;
> > smu->pm_enabled = !!amdgpu_dpm;
> > smu->is_apu = false;
> > - mutex_init(&smu->mutex);
> > mutex_init(&smu->smu_baco.mutex);
> > smu->smu_baco.state = SMU_BACO_STATE_EXIT;
> > smu->smu_baco.platform_support = false; @@ -736,8 +700,7 @@
> static
> > int smu_late_init(void *handle)
> >
> > smu_handle_task(smu,
> > smu->smu_dpm.dpm_level,
> > - AMD_PP_TASK_COMPLETE_INIT,
> > - false);
> > + AMD_PP_TASK_COMPLETE_INIT);
> >
> > smu_restore_dpm_user_profile(smu);
> >
> > @@ -1013,12 +976,8 @@ static void smu_interrupt_work_fn(struct
> work_struct *work)
> > struct smu_context *smu = container_of(work, struct smu_context,
> > interrupt_work);
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
> > smu->ppt_funcs->interrupt_work(smu);
> > -
> > - mutex_unlock(&smu->mutex);
> > }
> >
> > static int smu_sw_init(void *handle) @@ -1632,8 +1591,6 @@ static
> > int smu_display_configuration_change(void *handle,
> > if (!display_config)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > smu_set_min_dcef_deep_sleep(smu,
> > display_config-
> >min_dcef_deep_sleep_set_clk / 100);
> >
> > @@ -1642,8 +1599,6 @@ static int smu_display_configuration_change(void
> *handle,
> > num_of_active_display++;
> > }
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return 0;
> > }
> >
> > @@ -1766,22 +1721,18 @@ static int
> > smu_adjust_power_state_dynamic(struct smu_context *smu,
> >
> > static int smu_handle_task(struct smu_context *smu,
> > enum amd_dpm_forced_level level,
> > - enum amd_pp_task task_id,
> > - bool lock_needed)
> > + enum amd_pp_task task_id)
> > {
> > int ret = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - if (lock_needed)
> > - mutex_lock(&smu->mutex);
> > -
> > switch (task_id) {
> > case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
> > ret = smu_pre_display_config_changed(smu);
> > if (ret)
> > - goto out;
> > + return ret;
> > ret = smu_adjust_power_state_dynamic(smu, level, false);
> > break;
> > case AMD_PP_TASK_COMPLETE_INIT:
> > @@ -1792,10 +1743,6 @@ static int smu_handle_task(struct smu_context
> *smu,
> > break;
> > }
> >
> > -out:
> > - if (lock_needed)
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -1806,7 +1753,7 @@ static int smu_handle_dpm_task(void *handle,
> > struct smu_context *smu = handle;
> > struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
> >
> > - return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
> > + return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
> >
> > }
> >
> > @@ -1825,8 +1772,6 @@ static int smu_switch_power_profile(void
> *handle,
> > if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (!en) {
> > smu->workload_mask &= ~(1 << smu-
> >workload_prority[type]);
> > index = fls(smu->workload_mask);
> > @@ -1843,8 +1788,6 @@ static int smu_switch_power_profile(void
> *handle,
> > smu_dpm_ctx->dpm_level !=
> AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
> > smu_bump_power_profile_mode(smu, &workload, 0);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return 0;
> > }
> >
> > @@ -1852,7 +1795,6 @@ static enum amd_dpm_forced_level
> smu_get_performance_level(void *handle)
> > {
> > struct smu_context *smu = handle;
> > struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> > - enum amd_dpm_forced_level level;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> > @@ -1860,11 +1802,7 @@ static enum amd_dpm_forced_level
> smu_get_performance_level(void *handle)
> > if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> > return -EINVAL;
> >
> > - mutex_lock(&(smu->mutex));
> > - level = smu_dpm_ctx->dpm_level;
> > - mutex_unlock(&(smu->mutex));
> > -
> > - return level;
> > + return smu_dpm_ctx->dpm_level;
> > }
> >
> > static int smu_force_performance_level(void *handle, @@ -1880,19
> > +1818,12 @@ static int smu_force_performance_level(void *handle,
> > if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu_enable_umd_pstate(smu, &level);
> > - if (ret) {
> > - mutex_unlock(&smu->mutex);
> > + if (ret)
> > return ret;
> > - }
> >
> > ret = smu_handle_task(smu, level,
> > - AMD_PP_TASK_READJUST_POWER_STATE,
> > - false);
> > -
> > - mutex_unlock(&smu->mutex);
> > + AMD_PP_TASK_READJUST_POWER_STATE);
> >
> > /* reset user dpm clock state */
> > if (!ret && smu_dpm_ctx->dpm_level !=
> AMD_DPM_FORCED_LEVEL_MANUAL)
> > { @@ -1906,16 +1837,11 @@ static int smu_force_performance_level(void
> *handle,
> > static int smu_set_display_count(void *handle, uint32_t count)
> > {
> > struct smu_context *smu = handle;
> > - int ret = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > - ret = smu_init_display_count(smu, count);
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return smu_init_display_count(smu, count);
> > }
> >
> > static int smu_force_smuclk_levels(struct smu_context *smu, @@
> > -1933,8 +1859,6 @@ static int smu_force_smuclk_levels(struct
> smu_context *smu,
> > return -EINVAL;
> > }
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
> > ret = smu->ppt_funcs->force_clk_levels(smu, clk_type,
> mask);
> > if (!ret && !(smu->user_dpm_profile.flags &
> > SMU_DPM_USER_PROFILE_RESTORE)) { @@ -1943,8 +1867,6 @@ static int
> smu_force_smuclk_levels(struct smu_context *smu,
> > }
> > }
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2003,14 +1925,10 @@ static int smu_set_mp1_state(void *handle,
> > if (!smu->pm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs &&
> > smu->ppt_funcs->set_mp1_state)
> > ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2026,14 +1944,10 @@ static int smu_set_df_cstate(void *handle,
> > if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
> > return 0;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu->ppt_funcs->set_df_cstate(smu, state);
> > if (ret)
> > dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2047,38 +1961,25 @@ int smu_allow_xgmi_power_down(struct
> smu_context *smu, bool en)
> > if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
> > return 0;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
> > if (ret)
> > dev_err(smu->adev->dev, "[AllowXgmiPowerDown]
> failed!\n");
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > int smu_write_watermarks_table(struct smu_context *smu)
> > {
> > - int ret = 0;
> > -
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - ret = smu_set_watermarks_table(smu, NULL);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return smu_set_watermarks_table(smu, NULL);
> > }
> >
> > static int smu_set_watermarks_for_clock_ranges(void *handle,
> > struct pp_smu_wm_range_sets
> *clock_ranges)
> > {
> > struct smu_context *smu = handle;
> > - int ret = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> > @@ -2086,13 +1987,7 @@ static int
> smu_set_watermarks_for_clock_ranges(void *handle,
> > if (smu->disable_watermark)
> > return 0;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - ret = smu_set_watermarks_table(smu, clock_ranges);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return smu_set_watermarks_table(smu, clock_ranges);
> > }
> >
> > int smu_set_ac_dc(struct smu_context *smu) @@ -2106,14 +2001,12 @@
> > int smu_set_ac_dc(struct smu_context *smu)
> > if (smu->dc_controlled_by_gpio)
> > return 0;
> >
> > - mutex_lock(&smu->mutex);
> > ret = smu_set_power_source(smu,
> > smu->adev->pm.ac_power ?
> SMU_POWER_SOURCE_AC :
> > SMU_POWER_SOURCE_DC);
> > if (ret)
> > dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
> > smu->adev->pm.ac_power ? "AC" : "DC");
> > - mutex_unlock(&smu->mutex);
> >
> > return ret;
> > }
> > @@ -2200,13 +2093,9 @@ static int smu_set_gfx_cgpg(struct smu_context
> *smu, bool enabled)
> > {
> > int ret = 0;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->set_gfx_cgpg)
> > ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2224,8 +2113,6 @@ static int smu_set_fan_speed_rpm(void *handle,
> uint32_t speed)
> > if (speed == U32_MAX)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
> > if (!ret && !(smu->user_dpm_profile.flags &
> SMU_DPM_USER_PROFILE_RESTORE)) {
> > smu->user_dpm_profile.flags |=
> SMU_CUSTOM_FAN_SPEED_RPM; @@
> > -2236,8 +2123,6 @@ static int smu_set_fan_speed_rpm(void *handle,
> uint32_t speed)
> > smu->user_dpm_profile.fan_speed_pwm = 0;
> > }
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2293,8 +2178,6 @@ int smu_get_power_limit(void *handle,
> > break;
> > }
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
> > if (smu->ppt_funcs->get_ppt_limit)
> > ret = smu->ppt_funcs->get_ppt_limit(smu, limit,
> limit_type,
> > limit_level); @@ -2328,8 +2211,6 @@ int smu_get_power_limit(void
> *handle,
> > }
> > }
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2342,21 +2223,16 @@ static int smu_set_power_limit(void *handle,
> uint32_t limit)
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > limit &= (1<<24)-1;
> > if (limit_type != SMU_DEFAULT_PPT_LIMIT)
> > - if (smu->ppt_funcs->set_power_limit) {
> > - ret = smu->ppt_funcs->set_power_limit(smu,
> limit_type, limit);
> > - goto out;
> > - }
> > + if (smu->ppt_funcs->set_power_limit)
> > + return smu->ppt_funcs->set_power_limit(smu,
> limit_type, limit);
> >
> > if (limit > smu->max_power_limit) {
> > dev_err(smu->adev->dev,
> > "New power limit (%d) is over the max
> allowed %d\n",
> > limit, smu->max_power_limit);
> > - ret = -EINVAL;
> > - goto out;
> > + return -EINVAL;
> > }
> >
> > if (!limit)
> > @@ -2368,9 +2244,6 @@ static int smu_set_power_limit(void *handle,
> uint32_t limit)
> > smu->user_dpm_profile.power_limit = limit;
> > }
> >
> > -out:
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2381,13 +2254,9 @@ static int smu_print_smuclk_levels(struct
> smu_context *smu, enum smu_clk_type cl
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->print_clk_levels)
> > ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2444,14 +2313,10 @@ static int smu_od_edit_dpm_table(void
> *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->od_edit_dpm_table) {
> > ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input,
> size);
> > }
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2475,8 +2340,6 @@ static int smu_read_sensor(void *handle,
> > size_val = *size_arg;
> > size = &size_val;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->read_sensor)
> > if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
> > goto unlock;
> > @@ -2517,8 +2380,6 @@ static int smu_read_sensor(void *handle,
> > }
> >
> > unlock:
> > - mutex_unlock(&smu->mutex);
> > -
> > // assign uint32_t to int
> > *size_arg = size_val;
> >
> > @@ -2528,7 +2389,6 @@ static int smu_read_sensor(void *handle,
> > static int smu_get_power_profile_mode(void *handle, char *buf)
> > {
> > struct smu_context *smu = handle;
> > - int ret = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> > !smu->ppt_funcs->get_power_profile_mode)
> > @@ -2536,13 +2396,7 @@ static int smu_get_power_profile_mode(void
> *handle, char *buf)
> > if (!buf)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return smu->ppt_funcs->get_power_profile_mode(smu, buf);
> > }
> >
> > static int smu_set_power_profile_mode(void *handle, @@ -2550,19
> > +2404,12 @@ static int smu_set_power_profile_mode(void *handle,
> > uint32_t param_size)
> > {
> > struct smu_context *smu = handle;
> > - int ret = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> > !smu->ppt_funcs->set_power_profile_mode)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - smu_bump_power_profile_mode(smu, param, param_size);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return smu_bump_power_profile_mode(smu, param, param_size);
> > }
> >
> >
> > @@ -2579,12 +2426,8 @@ static int smu_get_fan_control_mode(void
> *handle, u32 *fan_mode)
> > if (!fan_mode)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return 0;
> > }
> >
> > @@ -2602,8 +2445,6 @@ static int smu_set_fan_control_mode(void
> *handle, u32 value)
> > if (value == U32_MAX)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
> > if (ret)
> > goto out;
> > @@ -2620,8 +2461,6 @@ static int smu_set_fan_control_mode(void
> *handle, u32 value)
> > }
> >
> > out:
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2639,12 +2478,8 @@ static int smu_get_fan_speed_pwm(void
> *handle, u32 *speed)
> > if (!speed)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2662,8 +2497,6 @@ static int smu_set_fan_speed_pwm(void *handle,
> u32 speed)
> > if (speed == U32_MAX)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
> > if (!ret && !(smu->user_dpm_profile.flags &
> SMU_DPM_USER_PROFILE_RESTORE)) {
> > smu->user_dpm_profile.flags |=
> SMU_CUSTOM_FAN_SPEED_PWM; @@
> > -2674,8 +2507,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32
> speed)
> > smu->user_dpm_profile.fan_speed_rpm = 0;
> > }
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2693,30 +2524,19 @@ static int smu_get_fan_speed_rpm(void
> *handle, uint32_t *speed)
> > if (!speed)
> > return -EINVAL;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
> > {
> > struct smu_context *smu = handle;
> > - int ret = 0;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - ret = smu_set_min_dcef_deep_sleep(smu, clk);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return smu_set_min_dcef_deep_sleep(smu, clk);
> > }
> >
> > static int smu_get_clock_by_type_with_latency(void *handle, @@
> > -2730,8 +2550,6 @@ static int smu_get_clock_by_type_with_latency(void
> *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->get_clock_by_type_with_latency) {
> > switch (type) {
> > case amd_pp_sys_clock:
> > @@ -2748,15 +2566,12 @@ static int
> smu_get_clock_by_type_with_latency(void *handle,
> > break;
> > default:
> > dev_err(smu->adev->dev, "Invalid clock type!\n");
> > - mutex_unlock(&smu->mutex);
> > return -EINVAL;
> > }
> >
> > ret = smu->ppt_funcs-
> >get_clock_by_type_with_latency(smu, clk_type, clocks);
> > }
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2769,13 +2584,9 @@ static int
> smu_display_clock_voltage_request(void *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->display_clock_voltage_request)
> > ret = smu->ppt_funcs->display_clock_voltage_request(smu,
> > clock_req);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2789,13 +2600,9 @@ static int
> smu_display_disable_memory_clock_switch(void *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->display_disable_memory_clock_switch)
> > ret = smu->ppt_funcs-
> >display_disable_memory_clock_switch(smu,
> > disable_memory_clock_switch);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2808,13 +2615,9 @@ static int smu_set_xgmi_pstate(void *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->set_xgmi_pstate)
> > ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > if(ret)
> > dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
> >
> > @@ -2824,21 +2627,16 @@ static int smu_set_xgmi_pstate(void *handle,
> > static int smu_get_baco_capability(void *handle, bool *cap)
> > {
> > struct smu_context *smu = handle;
> > - int ret = 0;
> >
> > *cap = false;
> >
> > if (!smu->pm_enabled)
> > return 0;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
> > *cap = smu->ppt_funcs->baco_is_support(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > - return ret;
> > + return 0;
> > }
> >
> > static int smu_baco_set_state(void *handle, int state) @@ -2850,20
> > +2648,11 @@ static int smu_baco_set_state(void *handle, int state)
> > return -EOPNOTSUPP;
> >
> > if (state == 0) {
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->baco_exit)
> > ret = smu->ppt_funcs->baco_exit(smu);
> > -
> > - mutex_unlock(&smu->mutex);
> > } else if (state == 1) {
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->baco_enter)
> > ret = smu->ppt_funcs->baco_enter(smu);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > } else {
> > return -EINVAL;
> > }
> > @@ -2882,13 +2671,9 @@ bool smu_mode1_reset_is_support(struct
> smu_context *smu)
> > if (!smu->pm_enabled)
> > return false;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
> > ret = smu->ppt_funcs->mode1_reset_is_support(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2899,13 +2684,9 @@ bool smu_mode2_reset_is_support(struct
> smu_context *smu)
> > if (!smu->pm_enabled)
> > return false;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
> > ret = smu->ppt_funcs->mode2_reset_is_support(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2916,13 +2697,9 @@ int smu_mode1_reset(struct smu_context *smu)
> > if (!smu->pm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->mode1_reset)
> > ret = smu->ppt_funcs->mode1_reset(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2934,13 +2711,9 @@ static int smu_mode2_reset(void *handle)
> > if (!smu->pm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->mode2_reset)
> > ret = smu->ppt_funcs->mode2_reset(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > if (ret)
> > dev_err(smu->adev->dev, "Mode2 reset failed!\n");
> >
> > @@ -2956,13 +2729,9 @@ static int
> smu_get_max_sustainable_clocks_by_dc(void *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
> > ret = smu->ppt_funcs-
> >get_max_sustainable_clocks_by_dc(smu,
> > max_clocks);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2976,13 +2745,9 @@ static int smu_get_uclk_dpm_states(void
> *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->get_uclk_dpm_states)
> > ret = smu->ppt_funcs->get_uclk_dpm_states(smu,
> > clock_values_in_khz, num_states);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -2994,13 +2759,9 @@ static enum amd_pm_state_type
> smu_get_current_power_state(void *handle)
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->get_current_power_state)
> > pm_state = smu->ppt_funcs-
> >get_current_power_state(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return pm_state;
> > }
> >
> > @@ -3013,20 +2774,15 @@ static int smu_get_dpm_clock_table(void
> *handle,
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->get_dpm_clock_table)
> > ret = smu->ppt_funcs->get_dpm_clock_table(smu,
> clock_table);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
> > {
> > struct smu_context *smu = handle;
> > - ssize_t size;
> >
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> > @@ -3034,13 +2790,7 @@ static ssize_t smu_sys_get_gpu_metrics(void
> *handle, void **table)
> > if (!smu->ppt_funcs->get_gpu_metrics)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > - size = smu->ppt_funcs->get_gpu_metrics(smu, table);
> > -
> > - mutex_unlock(&smu->mutex);
> > -
> > - return size;
> > + return smu->ppt_funcs->get_gpu_metrics(smu, table);
> > }
> >
> > static int smu_enable_mgpu_fan_boost(void *handle) @@ -3051,13
> > +2801,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
> > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> > return -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > -
> > if (smu->ppt_funcs->enable_mgpu_fan_boost)
> > ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
> >
> > - mutex_unlock(&smu->mutex);
> > -
> > return ret;
> > }
> >
> > @@ -3067,10 +2813,8 @@ static int smu_gfx_state_change_set(void
> *handle,
> > struct smu_context *smu = handle;
> > int ret = 0;
> >
> > - mutex_lock(&smu->mutex);
> > if (smu->ppt_funcs->gfx_state_change_set)
> > ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
> > - mutex_unlock(&smu->mutex);
> >
> > return ret;
> > }
> > @@ -3079,10 +2823,8 @@ int smu_handle_passthrough_sbr(struct
> smu_context *smu, bool enable)
> > {
> > int ret = 0;
> >
> > - mutex_lock(&smu->mutex);
> > if (smu->ppt_funcs->smu_handle_passthrough_sbr)
> > ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu,
> enable);
> > - mutex_unlock(&smu->mutex);
> >
> > return ret;
> > }
> > @@ -3091,11 +2833,9 @@ int smu_get_ecc_info(struct smu_context *smu,
> void *umc_ecc)
> > {
> > int ret = -EOPNOTSUPP;
> >
> > - mutex_lock(&smu->mutex);
> > if (smu->ppt_funcs &&
> > smu->ppt_funcs->get_ecc_info)
> > ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
> > - mutex_unlock(&smu->mutex);
> >
> > return ret;
> >
> > @@ -3112,12 +2852,10 @@ static int smu_get_prv_buffer_details(void
> > *handle, void **addr, size_t *size)
> >
> > *addr = NULL;
> > *size = 0;
> > - mutex_lock(&smu->mutex);
> > if (memory_pool->bo) {
> > *addr = memory_pool->cpu_addr;
> > *size = memory_pool->size;
> > }
> > - mutex_unlock(&smu->mutex);
> >
> > return 0;
> > }
> > @@ -3181,11 +2919,8 @@ int smu_wait_for_event(struct smu_context
> *smu, enum smu_event_type event,
> > {
> > int ret = -EINVAL;
> >
> > - if (smu->ppt_funcs->wait_for_event) {
> > - mutex_lock(&smu->mutex);
> > + if (smu->ppt_funcs->wait_for_event)
> > ret = smu->ppt_funcs->wait_for_event(smu, event,
> event_arg);
> > - mutex_unlock(&smu->mutex);
> > - }
> >
> > return ret;
> > }
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > index 3fdab6a44901..00760f3c6da5 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > @@ -488,7 +488,6 @@ struct smu_context
> > const struct cmn2asic_mapping *table_map;
> > const struct cmn2asic_mapping *pwr_src_map;
> > const struct cmn2asic_mapping *workload_map;
> > - struct mutex mutex;
> > struct mutex sensor_lock;
> > struct mutex metrics_lock;
> > struct mutex message_lock;
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> > b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> > index d3963bfe5c89..addb0472d040 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> > @@ -2118,9 +2118,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter
> *i2c_adap,
> > }
> > }
> > }
> > - mutex_lock(&smu->mutex);
> > r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> > - mutex_unlock(&smu->mutex);
> > if (r)
> > goto fail;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> > b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> > index 37e11716e919..fe17b3c1ece7 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> > @@ -2826,9 +2826,7 @@ static int navi10_i2c_xfer(struct i2c_adapter
> *i2c_adap,
> > }
> > }
> > }
> > - mutex_lock(&smu->mutex);
> > r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> > - mutex_unlock(&smu->mutex);
> > if (r)
> > goto fail;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > index 9766870987db..93caaf45a2db 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > @@ -3483,9 +3483,7 @@ static int sienna_cichlid_i2c_xfer(struct
> i2c_adapter *i2c_adap,
> > }
> > }
> > }
> > - mutex_lock(&smu->mutex);
> > r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> > - mutex_unlock(&smu->mutex);
> > if (r)
> > goto fail;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> > b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> > index ac8ba5e0e697..2546f79c8511 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> > @@ -1521,9 +1521,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter
> *i2c_adap,
> > }
> > }
> > }
> > - mutex_lock(&smu->mutex);
> > r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> > - mutex_unlock(&smu->mutex);
> > if (r)
> > goto fail;
> >
> >
>
> Could you check on i2c transfers? I don't see lock_ops implemented for i2c
> control, and with this copy operation of table is not protected.
[Quan, Evan] Hmm, maybe adev->pm.mutext should be used here. Please check V3.
BR
Evan
>
> Thanks,
> Lijo
^ permalink raw reply [flat|nested] 14+ messages in thread
* RE: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex
2022-01-20 13:37 ` [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Chen, Guchun
@ 2022-01-21 7:09 ` Quan, Evan
0 siblings, 0 replies; 14+ messages in thread
From: Quan, Evan @ 2022-01-21 7:09 UTC (permalink / raw)
To: Chen, Guchun, amd-gfx; +Cc: Deucher, Alexander, Lazar, Lijo
[Public]
> -----Original Message-----
> From: Chen, Guchun <Guchun.Chen@amd.com>
> Sent: Thursday, January 20, 2022 9:38 PM
> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo
> <Lijo.Lazar@amd.com>
> Subject: RE: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection
> smu->mutex
>
> [Public]
>
> if (!smu_table->hardcode_pptable)
> smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
> - if (!smu_table->hardcode_pptable) {
> - ret = -ENOMEM;
> - goto failed;
> - }
> + if (!smu_table->hardcode_pptable)
> + return -ENOMEM;
>
> I guess it's better to put the second check of hardcode_pptable into first if
> condition section like:
> if (!smu_table->hardcode_pptable) {
> smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
> if (!smu_table->hardcode_pptable)
> return -ENOMEM;
> }
[Quan, Evan] Thanks! Fixed in V3.
BR
Evan
>
>
> Regards,
> Guchun
>
> -----Original Message-----
> From: Quan, Evan <Evan.Quan@amd.com>
> Sent: Monday, January 17, 2022 1:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo
> <Lijo.Lazar@amd.com>; Chen, Guchun <Guchun.Chen@amd.com>; Quan,
> Evan <Evan.Quan@amd.com>
> Subject: [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu-
> >mutex
>
> As all those APIs are already protected either by adev->pm.mutex or smu-
> >message_lock.
>
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I1db751fba9caabc5ca1314992961d3674212f9b0
> ---
> drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 315 ++----------------
> drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 -
> .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 -
> .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 -
> .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 -
> .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 -
> 6 files changed, 25 insertions(+), 299 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 828cb932f6a9..411f03eb4523 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -55,8 +55,7 @@ static int smu_force_smuclk_levels(struct smu_context
> *smu,
> uint32_t mask);
> static int smu_handle_task(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> - enum amd_pp_task task_id,
> - bool lock_needed);
> + enum amd_pp_task task_id);
> static int smu_reset(struct smu_context *smu); static int
> smu_set_fan_speed_pwm(void *handle, u32 speed); static int
> smu_set_fan_control_mode(void *handle, u32 value); @@ -68,36 +67,22
> @@ static int smu_sys_get_pp_feature_mask(void *handle,
> char *buf)
> {
> struct smu_context *smu = handle;
> - int size = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - size = smu_get_pp_feature_mask(smu, buf);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return size;
> + return smu_get_pp_feature_mask(smu, buf);
> }
>
> static int smu_sys_set_pp_feature_mask(void *handle,
> uint64_t new_mask)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_pp_feature_mask(smu, new_mask);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_pp_feature_mask(smu, new_mask);
> }
>
> int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) @@
> -117,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu,
> {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_soft_freq_limited_range)
> ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
> clk_type,
> min,
> max);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -140,16 +121,12 @@ int smu_get_dpm_freq_range(struct smu_context
> *smu,
> if (!min && !max)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_dpm_ultimate_freq)
> ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
> clk_type,
> min,
> max);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -482,7 +459,6 @@ static int smu_sys_get_pp_table(void *handle, {
> struct smu_context *smu = handle;
> struct smu_table_context *smu_table = &smu->smu_table;
> - uint32_t powerplay_table_size;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -490,18 +466,12 @@ static int smu_sys_get_pp_table(void *handle,
> if (!smu_table->power_play_table && !smu_table-
> >hardcode_pptable)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu_table->hardcode_pptable)
> *table = smu_table->hardcode_pptable;
> else
> *table = smu_table->power_play_table;
>
> - powerplay_table_size = smu_table->power_play_table_size;
> -
> - mutex_unlock(&smu->mutex);
> -
> - return powerplay_table_size;
> + return smu_table->power_play_table_size;
> }
>
> static int smu_sys_set_pp_table(void *handle, @@ -521,13 +491,10 @@
> static int smu_sys_set_pp_table(void *handle,
> return -EIO;
> }
>
> - mutex_lock(&smu->mutex);
> if (!smu_table->hardcode_pptable)
> smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
> - if (!smu_table->hardcode_pptable) {
> - ret = -ENOMEM;
> - goto failed;
> - }
> + if (!smu_table->hardcode_pptable)
> + return -ENOMEM;
>
> memcpy(smu_table->hardcode_pptable, buf, size);
> smu_table->power_play_table = smu_table->hardcode_pptable;
> @@ -545,8 +512,6 @@ static int smu_sys_set_pp_table(void *handle,
>
> smu->uploading_custom_pp_table = false;
>
> -failed:
> - mutex_unlock(&smu->mutex);
> return ret;
> }
>
> @@ -633,7 +598,6 @@ static int smu_early_init(void *handle)
> smu->adev = adev;
> smu->pm_enabled = !!amdgpu_dpm;
> smu->is_apu = false;
> - mutex_init(&smu->mutex);
> mutex_init(&smu->smu_baco.mutex);
> smu->smu_baco.state = SMU_BACO_STATE_EXIT;
> smu->smu_baco.platform_support = false; @@ -736,8 +700,7 @@
> static int smu_late_init(void *handle)
>
> smu_handle_task(smu,
> smu->smu_dpm.dpm_level,
> - AMD_PP_TASK_COMPLETE_INIT,
> - false);
> + AMD_PP_TASK_COMPLETE_INIT);
>
> smu_restore_dpm_user_profile(smu);
>
> @@ -1013,12 +976,8 @@ static void smu_interrupt_work_fn(struct
> work_struct *work)
> struct smu_context *smu = container_of(work, struct smu_context,
> interrupt_work);
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
> smu->ppt_funcs->interrupt_work(smu);
> -
> - mutex_unlock(&smu->mutex);
> }
>
> static int smu_sw_init(void *handle)
> @@ -1632,8 +1591,6 @@ static int smu_display_configuration_change(void
> *handle,
> if (!display_config)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> smu_set_min_dcef_deep_sleep(smu,
> display_config-
> >min_dcef_deep_sleep_set_clk / 100);
>
> @@ -1642,8 +1599,6 @@ static int smu_display_configuration_change(void
> *handle,
> num_of_active_display++;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -1766,22 +1721,18 @@ static int
> smu_adjust_power_state_dynamic(struct smu_context *smu,
>
> static int smu_handle_task(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> - enum amd_pp_task task_id,
> - bool lock_needed)
> + enum amd_pp_task task_id)
> {
> int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - if (lock_needed)
> - mutex_lock(&smu->mutex);
> -
> switch (task_id) {
> case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
> ret = smu_pre_display_config_changed(smu);
> if (ret)
> - goto out;
> + return ret;
> ret = smu_adjust_power_state_dynamic(smu, level, false);
> break;
> case AMD_PP_TASK_COMPLETE_INIT:
> @@ -1792,10 +1743,6 @@ static int smu_handle_task(struct smu_context
> *smu,
> break;
> }
>
> -out:
> - if (lock_needed)
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -1806,7 +1753,7 @@ static int smu_handle_dpm_task(void *handle,
> struct smu_context *smu = handle;
> struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
>
> - return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
> + return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
>
> }
>
> @@ -1825,8 +1772,6 @@ static int smu_switch_power_profile(void *handle,
> if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> if (!en) {
> smu->workload_mask &= ~(1 << smu-
> >workload_prority[type]);
> index = fls(smu->workload_mask);
> @@ -1843,8 +1788,6 @@ static int smu_switch_power_profile(void *handle,
> smu_dpm_ctx->dpm_level !=
> AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
> smu_bump_power_profile_mode(smu, &workload, 0);
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -1852,7 +1795,6 @@ static enum amd_dpm_forced_level
> smu_get_performance_level(void *handle) {
> struct smu_context *smu = handle;
> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> - enum amd_dpm_forced_level level;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -1860,11 +1802,7 @@ static enum amd_dpm_forced_level
> smu_get_performance_level(void *handle)
> if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> return -EINVAL;
>
> - mutex_lock(&(smu->mutex));
> - level = smu_dpm_ctx->dpm_level;
> - mutex_unlock(&(smu->mutex));
> -
> - return level;
> + return smu_dpm_ctx->dpm_level;
> }
>
> static int smu_force_performance_level(void *handle, @@ -1880,19
> +1818,12 @@ static int smu_force_performance_level(void *handle,
> if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu_enable_umd_pstate(smu, &level);
> - if (ret) {
> - mutex_unlock(&smu->mutex);
> + if (ret)
> return ret;
> - }
>
> ret = smu_handle_task(smu, level,
> - AMD_PP_TASK_READJUST_POWER_STATE,
> - false);
> -
> - mutex_unlock(&smu->mutex);
> + AMD_PP_TASK_READJUST_POWER_STATE);
>
> /* reset user dpm clock state */
> if (!ret && smu_dpm_ctx->dpm_level !=
> AMD_DPM_FORCED_LEVEL_MANUAL) { @@ -1906,16 +1837,11 @@ static int
> smu_force_performance_level(void *handle, static int
> smu_set_display_count(void *handle, uint32_t count) {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> - ret = smu_init_display_count(smu, count);
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_init_display_count(smu, count);
> }
>
> static int smu_force_smuclk_levels(struct smu_context *smu, @@ -1933,8
> +1859,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
> return -EINVAL;
> }
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
> ret = smu->ppt_funcs->force_clk_levels(smu, clk_type,
> mask);
> if (!ret && !(smu->user_dpm_profile.flags &
> SMU_DPM_USER_PROFILE_RESTORE)) { @@ -1943,8 +1867,6 @@ static int
> smu_force_smuclk_levels(struct smu_context *smu,
> }
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2003,14 +1925,10 @@ static int smu_set_mp1_state(void *handle,
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs &&
> smu->ppt_funcs->set_mp1_state)
> ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2026,14 +1944,10 @@ static int smu_set_df_cstate(void *handle,
> if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_df_cstate(smu, state);
> if (ret)
> dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2047,38 +1961,25 @@ int smu_allow_xgmi_power_down(struct
> smu_context *smu, bool en)
> if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
> if (ret)
> dev_err(smu->adev->dev, "[AllowXgmiPowerDown]
> failed!\n");
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> int smu_write_watermarks_table(struct smu_context *smu) {
> - int ret = 0;
> -
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_watermarks_table(smu, NULL);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_watermarks_table(smu, NULL);
> }
>
> static int smu_set_watermarks_for_clock_ranges(void *handle,
> struct pp_smu_wm_range_sets
> *clock_ranges) {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -2086,13 +1987,7 @@ static int
> smu_set_watermarks_for_clock_ranges(void *handle,
> if (smu->disable_watermark)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_watermarks_table(smu, clock_ranges);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_watermarks_table(smu, clock_ranges);
> }
>
> int smu_set_ac_dc(struct smu_context *smu) @@ -2106,14 +2001,12 @@
> int smu_set_ac_dc(struct smu_context *smu)
> if (smu->dc_controlled_by_gpio)
> return 0;
>
> - mutex_lock(&smu->mutex);
> ret = smu_set_power_source(smu,
> smu->adev->pm.ac_power ?
> SMU_POWER_SOURCE_AC :
> SMU_POWER_SOURCE_DC);
> if (ret)
> dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
> smu->adev->pm.ac_power ? "AC" : "DC");
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -2200,13 +2093,9 @@ static int smu_set_gfx_cgpg(struct smu_context
> *smu, bool enabled) {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_gfx_cgpg)
> ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2224,8 +2113,6 @@ static int smu_set_fan_speed_rpm(void *handle,
> uint32_t speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
> if (!ret && !(smu->user_dpm_profile.flags &
> SMU_DPM_USER_PROFILE_RESTORE)) {
> smu->user_dpm_profile.flags |=
> SMU_CUSTOM_FAN_SPEED_RPM; @@ -2236,8 +2123,6 @@ static int
> smu_set_fan_speed_rpm(void *handle, uint32_t speed)
> smu->user_dpm_profile.fan_speed_pwm = 0;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2293,8 +2178,6 @@ int smu_get_power_limit(void *handle,
> break;
> }
>
> - mutex_lock(&smu->mutex);
> -
> if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
> if (smu->ppt_funcs->get_ppt_limit)
> ret = smu->ppt_funcs->get_ppt_limit(smu, limit,
> limit_type, limit_level); @@ -2328,8 +2211,6 @@ int
> smu_get_power_limit(void *handle,
> }
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2342,21 +2223,16 @@ static int smu_set_power_limit(void *handle,
> uint32_t limit)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> limit &= (1<<24)-1;
> if (limit_type != SMU_DEFAULT_PPT_LIMIT)
> - if (smu->ppt_funcs->set_power_limit) {
> - ret = smu->ppt_funcs->set_power_limit(smu,
> limit_type, limit);
> - goto out;
> - }
> + if (smu->ppt_funcs->set_power_limit)
> + return smu->ppt_funcs->set_power_limit(smu,
> limit_type, limit);
>
> if (limit > smu->max_power_limit) {
> dev_err(smu->adev->dev,
> "New power limit (%d) is over the max
> allowed %d\n",
> limit, smu->max_power_limit);
> - ret = -EINVAL;
> - goto out;
> + return -EINVAL;
> }
>
> if (!limit)
> @@ -2368,9 +2244,6 @@ static int smu_set_power_limit(void *handle,
> uint32_t limit)
> smu->user_dpm_profile.power_limit = limit;
> }
>
> -out:
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2381,13 +2254,9 @@ static int smu_print_smuclk_levels(struct
> smu_context *smu, enum smu_clk_type cl
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->print_clk_levels)
> ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2444,14 +2313,10 @@ static int smu_od_edit_dpm_table(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->od_edit_dpm_table) {
> ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input,
> size);
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2475,8 +2340,6 @@ static int smu_read_sensor(void *handle,
> size_val = *size_arg;
> size = &size_val;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->read_sensor)
> if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
> goto unlock;
> @@ -2517,8 +2380,6 @@ static int smu_read_sensor(void *handle,
> }
>
> unlock:
> - mutex_unlock(&smu->mutex);
> -
> // assign uint32_t to int
> *size_arg = size_val;
>
> @@ -2528,7 +2389,6 @@ static int smu_read_sensor(void *handle, static int
> smu_get_power_profile_mode(void *handle, char *buf) {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> !smu->ppt_funcs->get_power_profile_mode)
> @@ -2536,13 +2396,7 @@ static int smu_get_power_profile_mode(void
> *handle, char *buf)
> if (!buf)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu->ppt_funcs->get_power_profile_mode(smu, buf);
> }
>
> static int smu_set_power_profile_mode(void *handle, @@ -2550,19
> +2404,12 @@ static int smu_set_power_profile_mode(void *handle,
> uint32_t param_size)
> {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> !smu->ppt_funcs->set_power_profile_mode)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - smu_bump_power_profile_mode(smu, param, param_size);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_bump_power_profile_mode(smu, param, param_size);
> }
>
>
> @@ -2579,12 +2426,8 @@ static int smu_get_fan_control_mode(void
> *handle, u32 *fan_mode)
> if (!fan_mode)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return 0;
> }
>
> @@ -2602,8 +2445,6 @@ static int smu_set_fan_control_mode(void *handle,
> u32 value)
> if (value == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
> if (ret)
> goto out;
> @@ -2620,8 +2461,6 @@ static int smu_set_fan_control_mode(void *handle,
> u32 value)
> }
>
> out:
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2639,12 +2478,8 @@ static int smu_get_fan_speed_pwm(void *handle,
> u32 *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2662,8 +2497,6 @@ static int smu_set_fan_speed_pwm(void *handle,
> u32 speed)
> if (speed == U32_MAX)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
> if (!ret && !(smu->user_dpm_profile.flags &
> SMU_DPM_USER_PROFILE_RESTORE)) {
> smu->user_dpm_profile.flags |=
> SMU_CUSTOM_FAN_SPEED_PWM; @@ -2674,8 +2507,6 @@ static int
> smu_set_fan_speed_pwm(void *handle, u32 speed)
> smu->user_dpm_profile.fan_speed_rpm = 0;
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2693,30 +2524,19 @@ static int smu_get_fan_speed_rpm(void *handle,
> uint32_t *speed)
> if (!speed)
> return -EINVAL;
>
> - mutex_lock(&smu->mutex);
> -
> ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - ret = smu_set_min_dcef_deep_sleep(smu, clk);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return smu_set_min_dcef_deep_sleep(smu, clk);
> }
>
> static int smu_get_clock_by_type_with_latency(void *handle, @@ -2730,8
> +2550,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_clock_by_type_with_latency) {
> switch (type) {
> case amd_pp_sys_clock:
> @@ -2748,15 +2566,12 @@ static int
> smu_get_clock_by_type_with_latency(void *handle,
> break;
> default:
> dev_err(smu->adev->dev, "Invalid clock type!\n");
> - mutex_unlock(&smu->mutex);
> return -EINVAL;
> }
>
> ret = smu->ppt_funcs-
> >get_clock_by_type_with_latency(smu, clk_type, clocks);
> }
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2769,13 +2584,9 @@ static int smu_display_clock_voltage_request(void
> *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->display_clock_voltage_request)
> ret = smu->ppt_funcs->display_clock_voltage_request(smu,
> clock_req);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2789,13 +2600,9 @@ static int
> smu_display_disable_memory_clock_switch(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->display_disable_memory_clock_switch)
> ret = smu->ppt_funcs-
> >display_disable_memory_clock_switch(smu,
> disable_memory_clock_switch);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2808,13 +2615,9 @@ static int smu_set_xgmi_pstate(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->set_xgmi_pstate)
> ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
>
> - mutex_unlock(&smu->mutex);
> -
> if(ret)
> dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
>
> @@ -2824,21 +2627,16 @@ static int smu_set_xgmi_pstate(void *handle,
> static int smu_get_baco_capability(void *handle, bool *cap) {
> struct smu_context *smu = handle;
> - int ret = 0;
>
> *cap = false;
>
> if (!smu->pm_enabled)
> return 0;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
> *cap = smu->ppt_funcs->baco_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> - return ret;
> + return 0;
> }
>
> static int smu_baco_set_state(void *handle, int state) @@ -2850,20
> +2648,11 @@ static int smu_baco_set_state(void *handle, int state)
> return -EOPNOTSUPP;
>
> if (state == 0) {
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->baco_exit)
> ret = smu->ppt_funcs->baco_exit(smu);
> -
> - mutex_unlock(&smu->mutex);
> } else if (state == 1) {
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->baco_enter)
> ret = smu->ppt_funcs->baco_enter(smu);
> -
> - mutex_unlock(&smu->mutex);
> -
> } else {
> return -EINVAL;
> }
> @@ -2882,13 +2671,9 @@ bool smu_mode1_reset_is_support(struct
> smu_context *smu)
> if (!smu->pm_enabled)
> return false;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
> ret = smu->ppt_funcs->mode1_reset_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2899,13 +2684,9 @@ bool smu_mode2_reset_is_support(struct
> smu_context *smu)
> if (!smu->pm_enabled)
> return false;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
> ret = smu->ppt_funcs->mode2_reset_is_support(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2916,13 +2697,9 @@ int smu_mode1_reset(struct smu_context *smu)
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->mode1_reset)
> ret = smu->ppt_funcs->mode1_reset(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2934,13 +2711,9 @@ static int smu_mode2_reset(void *handle)
> if (!smu->pm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->mode2_reset)
> ret = smu->ppt_funcs->mode2_reset(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> if (ret)
> dev_err(smu->adev->dev, "Mode2 reset failed!\n");
>
> @@ -2956,13 +2729,9 @@ static int
> smu_get_max_sustainable_clocks_by_dc(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
> ret = smu->ppt_funcs-
> >get_max_sustainable_clocks_by_dc(smu, max_clocks);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2976,13 +2745,9 @@ static int smu_get_uclk_dpm_states(void *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_uclk_dpm_states)
> ret = smu->ppt_funcs->get_uclk_dpm_states(smu,
> clock_values_in_khz, num_states);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -2994,13 +2759,9 @@ static enum amd_pm_state_type
> smu_get_current_power_state(void *handle)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_current_power_state)
> pm_state = smu->ppt_funcs-
> >get_current_power_state(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return pm_state;
> }
>
> @@ -3013,20 +2774,15 @@ static int smu_get_dpm_clock_table(void
> *handle,
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->get_dpm_clock_table)
> ret = smu->ppt_funcs->get_dpm_clock_table(smu,
> clock_table);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) {
> struct smu_context *smu = handle;
> - ssize_t size;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -3034,13 +2790,7 @@ static ssize_t smu_sys_get_gpu_metrics(void
> *handle, void **table)
> if (!smu->ppt_funcs->get_gpu_metrics)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> - size = smu->ppt_funcs->get_gpu_metrics(smu, table);
> -
> - mutex_unlock(&smu->mutex);
> -
> - return size;
> + return smu->ppt_funcs->get_gpu_metrics(smu, table);
> }
>
> static int smu_enable_mgpu_fan_boost(void *handle) @@ -3051,13 +2801,9
> @@ static int smu_enable_mgpu_fan_boost(void *handle)
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> -
> if (smu->ppt_funcs->enable_mgpu_fan_boost)
> ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
>
> - mutex_unlock(&smu->mutex);
> -
> return ret;
> }
>
> @@ -3067,10 +2813,8 @@ static int smu_gfx_state_change_set(void
> *handle,
> struct smu_context *smu = handle;
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs->gfx_state_change_set)
> ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -3079,10 +2823,8 @@ int smu_handle_passthrough_sbr(struct
> smu_context *smu, bool enable) {
> int ret = 0;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs->smu_handle_passthrough_sbr)
> ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu,
> enable);
> - mutex_unlock(&smu->mutex);
>
> return ret;
> }
> @@ -3091,11 +2833,9 @@ int smu_get_ecc_info(struct smu_context *smu,
> void *umc_ecc) {
> int ret = -EOPNOTSUPP;
>
> - mutex_lock(&smu->mutex);
> if (smu->ppt_funcs &&
> smu->ppt_funcs->get_ecc_info)
> ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
> - mutex_unlock(&smu->mutex);
>
> return ret;
>
> @@ -3112,12 +2852,10 @@ static int smu_get_prv_buffer_details(void
> *handle, void **addr, size_t *size)
>
> *addr = NULL;
> *size = 0;
> - mutex_lock(&smu->mutex);
> if (memory_pool->bo) {
> *addr = memory_pool->cpu_addr;
> *size = memory_pool->size;
> }
> - mutex_unlock(&smu->mutex);
>
> return 0;
> }
> @@ -3181,11 +2919,8 @@ int smu_wait_for_event(struct smu_context
> *smu, enum smu_event_type event, {
> int ret = -EINVAL;
>
> - if (smu->ppt_funcs->wait_for_event) {
> - mutex_lock(&smu->mutex);
> + if (smu->ppt_funcs->wait_for_event)
> ret = smu->ppt_funcs->wait_for_event(smu, event,
> event_arg);
> - mutex_unlock(&smu->mutex);
> - }
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index 3fdab6a44901..00760f3c6da5 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -488,7 +488,6 @@ struct smu_context
> const struct cmn2asic_mapping *table_map;
> const struct cmn2asic_mapping *pwr_src_map;
> const struct cmn2asic_mapping *workload_map;
> - struct mutex mutex;
> struct mutex sensor_lock;
> struct mutex metrics_lock;
> struct mutex message_lock;
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> index d3963bfe5c89..addb0472d040 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> @@ -2118,9 +2118,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter
> *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 37e11716e919..fe17b3c1ece7 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -2826,9 +2826,7 @@ static int navi10_i2c_xfer(struct i2c_adapter
> *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 9766870987db..93caaf45a2db 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -3483,9 +3483,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter
> *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> index ac8ba5e0e697..2546f79c8511 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> @@ -1521,9 +1521,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter
> *i2c_adap,
> }
> }
> }
> - mutex_lock(&smu->mutex);
> r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0,
> req, true);
> - mutex_unlock(&smu->mutex);
> if (r)
> goto fail;
>
> --
> 2.29.0
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2022-01-21 7:09 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-17 5:41 [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Evan Quan
2022-01-17 5:41 ` [PATCH V2 2/7] drm/amd/pm: drop unneeded vcn/jpeg_gate_lock Evan Quan
2022-01-17 5:41 ` [PATCH V2 3/7] drm/amd/pm: drop unneeded smu->metrics_lock Evan Quan
2022-01-17 5:41 ` [PATCH V2 4/7] drm/amd/pm: drop unneeded smu->sensor_lock Evan Quan
2022-01-17 5:41 ` [PATCH V2 5/7] drm/amd/pm: drop unneeded smu_baco->mutex Evan Quan
2022-01-17 5:41 ` [PATCH V2 6/7] drm/amd/pm: drop unneeded feature->mutex Evan Quan
2022-01-17 5:41 ` [PATCH V2 7/7] drm/amd/pm: drop unneeded hwmgr->smu_lock Evan Quan
2022-01-20 11:51 ` Quan, Evan
2022-01-20 13:41 ` Chen, Guchun
2022-01-20 13:37 ` [PATCH V2 1/7] drm/amd/pm: drop unneeded lock protection smu->mutex Chen, Guchun
2022-01-21 7:09 ` Quan, Evan
2022-01-20 15:23 ` Lazar, Lijo
2022-01-21 7:08 ` Quan, Evan
2022-01-20 15:59 ` Lazar, Lijo
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.