All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/12] drm/amd/pm: drop unused structure members
@ 2022-02-11  7:51 Evan Quan
  2022-02-11  7:51 ` [PATCH 02/12] drm/amd/pm: drop unused interfaces Evan Quan
                   ` (11 more replies)
  0 siblings, 12 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:51 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

Drop those members which get never used.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: Iec70ad1dfe2059be26843f378588e6c894e9cae8
---
 drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index fbef3ab8d487..fb32846a2d0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -373,8 +373,6 @@ struct smu_dpm_context {
 };
 
 struct smu_power_gate {
-	bool uvd_gated;
-	bool vce_gated;
 	atomic_t vcn_gated;
 	atomic_t jpeg_gated;
 };
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 02/12] drm/amd/pm: drop unused interfaces
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
@ 2022-02-11  7:51 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 03/12] drm/amd/pm: drop unneeded !smu->pm_enabled check Evan Quan
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:51 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

Drop those interfaces which never get used.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: Ia22d395145a1003faca5ac792dca6a30ef2cae54
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 13 ---------
 drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |  5 ----
 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h  |  4 ---
 .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    |  6 -----
 .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c    | 27 -------------------
 5 files changed, 55 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 6535cf336fa5..1c3a5ccd100c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2686,19 +2686,6 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
 	return ret;
 }
 
-bool smu_mode2_reset_is_support(struct smu_context *smu)
-{
-	bool ret = false;
-
-	if (!smu->pm_enabled)
-		return false;
-
-	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
-		ret = smu->ppt_funcs->mode2_reset_is_support(smu);
-
-	return ret;
-}
-
 int smu_mode1_reset(struct smu_context *smu)
 {
 	int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index fb32846a2d0e..39d169440d15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1143,10 +1143,6 @@ struct pptable_funcs {
 	 * @mode1_reset_is_support: Check if GPU supports mode1 reset.
 	 */
 	bool (*mode1_reset_is_support)(struct smu_context *smu);
-	/**
-	 * @mode2_reset_is_support: Check if GPU supports mode2 reset.
-	 */
-	bool (*mode2_reset_is_support)(struct smu_context *smu);
 
 	/**
 	 * @mode1_reset: Perform mode1 reset.
@@ -1397,7 +1393,6 @@ int smu_get_power_limit(void *handle,
 			enum pp_power_type pp_power_type);
 
 bool smu_mode1_reset_is_support(struct smu_context *smu);
-bool smu_mode2_reset_is_support(struct smu_context *smu);
 int smu_mode1_reset(struct smu_context *smu);
 
 extern const struct amd_ip_funcs smu_ip_funcs;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 44af23ae059e..10f41cab796e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -156,12 +156,8 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu);
 int smu_v13_0_system_features_control(struct smu_context *smu,
 				      bool en);
 
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count);
-
 int smu_v13_0_set_allowed_mask(struct smu_context *smu);
 
-int smu_v13_0_notify_display_change(struct smu_context *smu);
-
 int smu_v13_0_get_current_power_limit(struct smu_context *smu,
 				      uint32_t *power_limit);
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 890acc4e2cb8..d7e619728e60 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1967,11 +1967,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
 	return true;
 }
 
-static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
-{
-	return true;
-}
-
 static int aldebaran_set_mp1_state(struct smu_context *smu,
 				   enum pp_mp1_state mp1_state)
 {
@@ -2052,7 +2047,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
 	.get_gpu_metrics = aldebaran_get_gpu_metrics,
 	.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
-	.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
 	.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
 	.mode1_reset = aldebaran_mode1_reset,
 	.set_mp1_state = aldebaran_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index f0ab1dc3ca59..b4fd148754ac 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -703,19 +703,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu)
 	return ret;
 }
 
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
-{
-	int ret = 0;
-
-	if (!smu->pm_enabled)
-		return ret;
-
-	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-
-	return ret;
-}
-
-
 int smu_v13_0_set_allowed_mask(struct smu_context *smu)
 {
 	struct smu_feature *feature = &smu->smu_feature;
@@ -768,20 +755,6 @@ int smu_v13_0_system_features_control(struct smu_context *smu,
 					  SMU_MSG_DisableAllSmuFeatures), NULL);
 }
 
-int smu_v13_0_notify_display_change(struct smu_context *smu)
-{
-	int ret = 0;
-
-	if (!smu->pm_enabled)
-		return ret;
-
-	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
-	    smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
-		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
-
-	return ret;
-}
-
 	static int
 smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
 				    enum smu_clk_type clock_select)
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 03/12] drm/amd/pm: drop unneeded !smu->pm_enabled check
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
  2022-02-11  7:51 ` [PATCH 02/12] drm/amd/pm: drop unused interfaces Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 04/12] drm/amd/pm: use adev->pm.dpm_enabled for dpm enablement check Evan Quan
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

As smu->pm_enabled is a prerequisite for adev->pm.dpm_enabled.
So, with adev->pm.dpm_enabled set, it can be guarded that
smu->pm_enabled is also set. Thus the extra check for
"!smu->pm_enabled" is totally unnecessary.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I6ff67137d447e6a3d8cc627b397428fed22753f3
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 84 +++++++++++------------
 1 file changed, 42 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 1c3a5ccd100c..96a3388c2cb7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -68,7 +68,7 @@ static int smu_sys_get_pp_feature_mask(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	return smu_get_pp_feature_mask(smu, buf);
@@ -79,7 +79,7 @@ static int smu_sys_set_pp_feature_mask(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	return smu_set_pp_feature_mask(smu, new_mask);
@@ -219,7 +219,7 @@ static int smu_dpm_set_power_gate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
+	if (!smu->adev->pm.dpm_enabled) {
 		dev_WARN(smu->adev->dev,
 			 "SMU uninitialized but power %s requested for %u!\n",
 			 gate ? "gate" : "ungate", block_type);
@@ -315,7 +315,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
 	if (!smu->adev->in_suspend)
 		return;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return;
 
 	/* Enable restore flag */
@@ -428,7 +428,7 @@ static int smu_sys_get_pp_table(void *handle,
 	struct smu_context *smu = handle;
 	struct smu_table_context *smu_table = &smu->smu_table;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
@@ -451,7 +451,7 @@ static int smu_sys_set_pp_table(void *handle,
 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (header->usStructureSize != size) {
@@ -1564,7 +1564,7 @@ static int smu_display_configuration_change(void *handle,
 	int index = 0;
 	int num_of_active_display = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!display_config)
@@ -1704,7 +1704,7 @@ static int smu_handle_task(struct smu_context *smu,
 {
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	switch (task_id) {
@@ -1745,7 +1745,7 @@ static int smu_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
@@ -1775,7 +1775,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
 	struct smu_context *smu = handle;
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
@@ -1791,7 +1791,7 @@ static int smu_force_performance_level(void *handle,
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
@@ -1817,7 +1817,7 @@ static int smu_set_display_count(void *handle, uint32_t count)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	return smu_init_display_count(smu, count);
@@ -1830,7 +1830,7 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1917,7 +1917,7 @@ static int smu_set_df_cstate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
@@ -1934,7 +1934,7 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 {
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
@@ -1949,7 +1949,7 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 
 int smu_write_watermarks_table(struct smu_context *smu)
 {
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	return smu_set_watermarks_table(smu, NULL);
@@ -1960,7 +1960,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->disable_watermark)
@@ -1973,7 +1973,7 @@ int smu_set_ac_dc(struct smu_context *smu)
 {
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	/* controlled by firmware */
@@ -2083,7 +2083,7 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs->set_fan_speed_rpm)
@@ -2126,7 +2126,7 @@ int smu_get_power_limit(void *handle,
 	uint32_t limit_type;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	switch(pp_power_type) {
@@ -2199,7 +2199,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
 	uint32_t limit_type = limit >> 24;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	limit &= (1<<24)-1;
@@ -2230,7 +2230,7 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
 {
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->print_clk_levels)
@@ -2319,7 +2319,7 @@ static int smu_od_edit_dpm_table(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->od_edit_dpm_table) {
@@ -2340,7 +2340,7 @@ static int smu_read_sensor(void *handle,
 	int ret = 0;
 	uint32_t *size, size_val;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!data || !size_arg)
@@ -2399,7 +2399,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
+	if (!smu->adev->pm.dpm_enabled ||
 	    !smu->ppt_funcs->get_power_profile_mode)
 		return -EOPNOTSUPP;
 	if (!buf)
@@ -2414,7 +2414,7 @@ static int smu_set_power_profile_mode(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
+	if (!smu->adev->pm.dpm_enabled ||
 	    !smu->ppt_funcs->set_power_profile_mode)
 		return -EOPNOTSUPP;
 
@@ -2426,7 +2426,7 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs->get_fan_control_mode)
@@ -2445,7 +2445,7 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs->set_fan_control_mode)
@@ -2478,7 +2478,7 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs->get_fan_speed_pwm)
@@ -2497,7 +2497,7 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs->set_fan_speed_pwm)
@@ -2524,7 +2524,7 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs->get_fan_speed_rpm)
@@ -2542,7 +2542,7 @@ static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	return smu_set_min_dcef_deep_sleep(smu, clk);
@@ -2556,7 +2556,7 @@ static int smu_get_clock_by_type_with_latency(void *handle,
 	enum smu_clk_type clk_type;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
@@ -2590,7 +2590,7 @@ static int smu_display_clock_voltage_request(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->display_clock_voltage_request)
@@ -2606,7 +2606,7 @@ static int smu_display_disable_memory_clock_switch(void *handle,
 	struct smu_context *smu = handle;
 	int ret = -EINVAL;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
@@ -2621,7 +2621,7 @@ static int smu_set_xgmi_pstate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->set_xgmi_pstate)
@@ -2722,7 +2722,7 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
@@ -2738,7 +2738,7 @@ static int smu_get_uclk_dpm_states(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->get_uclk_dpm_states)
@@ -2752,7 +2752,7 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
 	struct smu_context *smu = handle;
 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->get_current_power_state)
@@ -2767,7 +2767,7 @@ static int smu_get_dpm_clock_table(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->get_dpm_clock_table)
@@ -2780,7 +2780,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (!smu->ppt_funcs->get_gpu_metrics)
@@ -2794,7 +2794,7 @@ static int smu_enable_mgpu_fan_boost(void *handle)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+	if (!smu->adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 04/12] drm/amd/pm: use adev->pm.dpm_enabled for dpm enablement check
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
  2022-02-11  7:51 ` [PATCH 02/12] drm/amd/pm: drop unused interfaces Evan Quan
  2022-02-11  7:52 ` [PATCH 03/12] drm/amd/pm: drop unneeded !smu->pm_enabled check Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c Evan Quan
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

adev->pm.dpm_enabled instead of hwmgr->pm_en can better reflect
whether the dpm features are actually enabled.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I6896dcee19bb473d26115cdcb12b6efd554b30f9
---
 drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c    |  39 +++---
 drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c    |  39 +++---
 .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 116 +++++++++---------
 .../gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c    |   6 +
 4 files changed, 104 insertions(+), 96 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8b23cc9f098a..19e75a3c8bb1 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3079,8 +3079,9 @@ static int kv_dpm_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->pm.dpm_enabled)
-		kv_dpm_disable(adev);
+	adev->pm.dpm_enabled = false;
+
+	kv_dpm_disable(adev);
 
 	return 0;
 }
@@ -3089,12 +3090,13 @@ static int kv_dpm_suspend(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->pm.dpm_enabled) {
-		/* disable dpm */
-		kv_dpm_disable(adev);
-		/* reset the power state */
-		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
-	}
+	adev->pm.dpm_enabled = false;
+
+	/* disable dpm */
+	kv_dpm_disable(adev);
+	/* reset the power state */
+	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+
 	return 0;
 }
 
@@ -3103,17 +3105,16 @@ static int kv_dpm_resume(void *handle)
 	int ret;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->pm.dpm_enabled) {
-		/* asic init will reset to the boot state */
-		kv_dpm_setup_asic(adev);
-		ret = kv_dpm_enable(adev);
-		if (ret)
-			adev->pm.dpm_enabled = false;
-		else
-			adev->pm.dpm_enabled = true;
-		if (adev->pm.dpm_enabled)
-			amdgpu_legacy_dpm_compute_clocks(adev);
-	}
+	/* asic init will reset to the boot state */
+	kv_dpm_setup_asic(adev);
+	ret = kv_dpm_enable(adev);
+	if (ret)
+		adev->pm.dpm_enabled = false;
+	else
+		adev->pm.dpm_enabled = true;
+	if (adev->pm.dpm_enabled)
+		amdgpu_legacy_dpm_compute_clocks(adev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index caae54487f9c..c6a294af8de8 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7847,8 +7847,9 @@ static int si_dpm_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->pm.dpm_enabled)
-		si_dpm_disable(adev);
+	adev->pm.dpm_enabled = false;
+
+	si_dpm_disable(adev);
 
 	return 0;
 }
@@ -7857,12 +7858,13 @@ static int si_dpm_suspend(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->pm.dpm_enabled) {
-		/* disable dpm */
-		si_dpm_disable(adev);
-		/* reset the power state */
-		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
-	}
+	adev->pm.dpm_enabled = false;
+
+	/* disable dpm */
+	si_dpm_disable(adev);
+	/* reset the power state */
+	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+
 	return 0;
 }
 
@@ -7871,17 +7873,16 @@ static int si_dpm_resume(void *handle)
 	int ret;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->pm.dpm_enabled) {
-		/* asic init will reset to the boot state */
-		si_dpm_setup_asic(adev);
-		ret = si_dpm_enable(adev);
-		if (ret)
-			adev->pm.dpm_enabled = false;
-		else
-			adev->pm.dpm_enabled = true;
-		if (adev->pm.dpm_enabled)
-			amdgpu_legacy_dpm_compute_clocks(adev);
-	}
+	/* asic init will reset to the boot state */
+	si_dpm_setup_asic(adev);
+	ret = si_dpm_enable(adev);
+	if (ret)
+		adev->pm.dpm_enabled = false;
+	else
+		adev->pm.dpm_enabled = true;
+	if (adev->pm.dpm_enabled)
+		amdgpu_legacy_dpm_compute_clocks(adev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index a2da46bf3985..991ac4adb263 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -295,7 +295,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
@@ -335,7 +335,7 @@ static int pp_dpm_force_performance_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (level == hwmgr->dpm_level)
@@ -353,7 +353,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	return hwmgr->dpm_level;
@@ -363,7 +363,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
@@ -377,7 +377,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
@@ -391,7 +391,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
@@ -405,7 +405,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
@@ -420,7 +420,7 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	return hwmgr_handle_task(hwmgr, task_id, user_state);
@@ -432,7 +432,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 	struct pp_power_state *state;
 	enum amd_pm_state_type pm_type;
 
-	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->current_ps)
 		return -EINVAL;
 
 	state = hwmgr->current_ps;
@@ -462,7 +462,7 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
@@ -480,7 +480,7 @@ static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
@@ -497,7 +497,7 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
@@ -513,7 +513,7 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
@@ -529,7 +529,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
@@ -545,7 +545,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
@@ -565,7 +565,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
 
 	memset(data, 0, sizeof(*data));
 
-	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->ps)
 		return -EINVAL;
 
 	data->nums = hwmgr->num_ps;
@@ -597,7 +597,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->soft_pp_table)
 		return -EINVAL;
 
 	*table = (char *)hwmgr->soft_pp_table;
@@ -625,7 +625,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = -ENOMEM;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (!hwmgr->hardcode_pp_table) {
@@ -655,7 +655,7 @@ static int pp_dpm_force_clock_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
@@ -676,7 +676,7 @@ static int pp_dpm_print_clock_levels(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
@@ -690,7 +690,7 @@ static int pp_dpm_get_sclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
@@ -704,7 +704,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
@@ -719,7 +719,7 @@ static int pp_dpm_get_mclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
@@ -733,7 +733,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
@@ -748,7 +748,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en || !value)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !value)
 		return -EINVAL;
 
 	switch (idx) {
@@ -774,7 +774,7 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return NULL;
 
 	if (idx < hwmgr->num_vce_state_tables)
@@ -786,7 +786,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_power_profile_mode)
 		return -EOPNOTSUPP;
 	if (!buf)
 		return -EINVAL;
@@ -798,7 +798,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_power_profile_mode)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -813,7 +813,7 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
@@ -826,7 +826,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
@@ -860,7 +860,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
@@ -900,7 +900,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 	struct pp_hwmgr *hwmgr = handle;
 	uint32_t max_power_limit;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
@@ -932,7 +932,7 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !hwmgr->pm_en ||!limit)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!limit)
 		return -EINVAL;
 
 	if (power_type != PP_PWR_TYPE_SUSTAINED)
@@ -965,7 +965,7 @@ static int pp_display_configuration_change(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	phm_store_dal_configuration_data(hwmgr, display_config);
@@ -977,7 +977,7 @@ static int pp_get_display_power_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en ||!output)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!output)
 		return -EINVAL;
 
 	return phm_get_dal_power_level(hwmgr, output);
@@ -991,7 +991,7 @@ static int pp_get_current_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	phm_get_dal_power_level(hwmgr, &simple_clocks);
@@ -1035,7 +1035,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (clocks == NULL)
@@ -1050,7 +1050,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en ||!clocks)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
@@ -1062,7 +1062,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en ||!clocks)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
@@ -1073,7 +1073,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !clock_ranges)
 		return -EINVAL;
 
 	return phm_set_watermarks_for_clocks_ranges(hwmgr,
@@ -1085,7 +1085,7 @@ static int pp_display_clock_voltage_request(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en ||!clock)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clock)
 		return -EINVAL;
 
 	return phm_display_clock_voltage_request(hwmgr, clock);
@@ -1097,7 +1097,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !hwmgr->pm_en ||!clocks)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
 		return -EINVAL;
 
 	clocks->level = PP_DAL_POWERLEVEL_7;
@@ -1112,7 +1112,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
@@ -1127,7 +1127,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return 0;
 
 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
@@ -1142,7 +1142,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
@@ -1208,7 +1208,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
@@ -1228,7 +1228,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!hwmgr->pm_en ||
+	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||
 	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
 		return 0;
 
@@ -1241,7 +1241,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
@@ -1258,7 +1258,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
@@ -1275,7 +1275,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
@@ -1292,7 +1292,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	return phm_set_active_display_count(hwmgr, count);
@@ -1350,7 +1350,7 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en || !buf)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !buf)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
@@ -1365,7 +1365,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
@@ -1395,7 +1395,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
@@ -1413,7 +1413,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
+	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_df_cstate)
 		return 0;
 
 	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
@@ -1428,7 +1428,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
+	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_xgmi_pstate)
 		return 0;
 
 	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
@@ -1443,7 +1443,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
+	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_gpu_metrics)
 		return -EOPNOTSUPP;
 
 	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
@@ -1453,7 +1453,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index f2cef0930aa9..4fd61d7f6c70 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -281,6 +281,8 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
 		return 0;
 
+	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false;
+
 	phm_stop_thermal_controller(hwmgr);
 	psm_set_boot_states(hwmgr);
 	psm_adjust_power_state_dynamic(hwmgr, true, NULL);
@@ -301,6 +303,8 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
 		return 0;
 
+	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false;
+
 	phm_disable_smc_firmware_ctf(hwmgr);
 	ret = psm_set_boot_states(hwmgr);
 	if (ret)
@@ -336,6 +340,8 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
 		return ret;
 
 	ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+	if (!ret)
+		((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
 
 	return ret;
 }
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (2 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 04/12] drm/amd/pm: use adev->pm.dpm_enabled for dpm enablement check Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  8:06   ` Chen, Guchun
  2022-02-11 13:39   ` Lazar, Lijo
  2022-02-11  7:52 ` [PATCH 06/12] drm/amd/pm: correct the checks for sriov(pp_one_vf) Evan Quan
                   ` (7 subsequent siblings)
  11 siblings, 2 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

Instead of checking this in every instance(framework), moving that check to
amdgpu_dpm.c is more proper. And that can make code clean and tidy.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I2f83a3b860e8aa12cc86f119011f520fbe21a301
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |   5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |  16 +-
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 277 ++++++++++++++++--
 drivers/gpu/drm/amd/pm/amdgpu_pm.c            |  25 +-
 drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  12 +-
 .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c    |   4 -
 .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 117 ++++----
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 135 +--------
 drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   1 -
 9 files changed, 352 insertions(+), 240 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 2c929fa40379..fff0e6a3882e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -261,11 +261,14 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
 {
 	struct amdgpu_device *adev = ctx->adev;
 	enum amd_dpm_forced_level current_level;
+	int ret = 0;
 
 	if (!ctx)
 		return -EINVAL;
 
-	current_level = amdgpu_dpm_get_performance_level(adev);
+	ret = amdgpu_dpm_get_performance_level(adev, &current_level);
+	if (ret)
+		return ret;
 
 	switch (current_level) {
 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9f985bd463be..56144f25b720 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -813,15 +813,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		unsigned i;
 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
 		struct amd_vce_state *vce_state;
+		int ret = 0;
 
 		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
-			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
-			if (vce_state) {
-				vce_clk_table.entries[i].sclk = vce_state->sclk;
-				vce_clk_table.entries[i].mclk = vce_state->mclk;
-				vce_clk_table.entries[i].eclk = vce_state->evclk;
-				vce_clk_table.num_valid_entries++;
-			}
+			ret = amdgpu_dpm_get_vce_clock_state(adev, i, vce_state);
+			if (ret)
+				return ret;
+
+			vce_clk_table.entries[i].sclk = vce_state->sclk;
+			vce_clk_table.entries[i].mclk = vce_state->mclk;
+			vce_clk_table.entries[i].eclk = vce_state->evclk;
+			vce_clk_table.num_valid_entries++;
 		}
 
 		return copy_to_user(out, &vce_clk_table,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 1d63f1e8884c..b46ae0063047 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -41,6 +41,9 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return 0;
+
 	if (!pp_funcs->get_sclk)
 		return 0;
 
@@ -57,6 +60,9 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return 0;
+
 	if (!pp_funcs->get_mclk)
 		return 0;
 
@@ -74,6 +80,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
 
+	if (!adev->pm.dpm_enabled) {
+		dev_WARN(adev->dev,
+			 "SMU uninitialized but power %s requested for %u!\n",
+			 gate ? "gate" : "ungate", block_type);
+		return -EOPNOTSUPP;
+	}
+
 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
 				block_type, gate ? "gate" : "ungate");
@@ -261,6 +274,9 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (amdgpu_sriov_vf(adev))
 		return 0;
 
@@ -280,6 +296,9 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
@@ -297,6 +316,9 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	void *pp_handle = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->set_df_cstate) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
@@ -311,6 +333,9 @@ int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev)) {
 		mutex_lock(&adev->pm.mutex);
 		ret = smu_allow_xgmi_power_down(smu, en);
@@ -327,6 +352,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 			adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
@@ -344,6 +372,9 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 			adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
@@ -362,6 +393,9 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 			adev->powerplay.pp_funcs;
 	int ret = -EOPNOTSUPP;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
@@ -398,6 +432,9 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = -EINVAL;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!data || !size)
 		return -EINVAL;
 
@@ -485,6 +522,9 @@ int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 {
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev)) {
 		mutex_lock(&adev->pm.mutex);
 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
@@ -500,6 +540,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	mutex_lock(&adev->pm.mutex);
 	ret = smu_send_hbm_bad_pages_num(smu, size);
 	mutex_unlock(&adev->pm.mutex);
@@ -514,6 +557,9 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 {
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (type != PP_SCLK)
 		return -EINVAL;
 
@@ -538,6 +584,9 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (type != PP_SCLK)
 		return -EINVAL;
 
@@ -556,14 +605,18 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 
 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 {
-	struct smu_context *smu = adev->powerplay.pp_handle;
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return 0;
 
 	mutex_lock(&adev->pm.mutex);
-	ret = smu_write_watermarks_table(smu);
+	ret = pp_funcs->set_watermarks_for_clock_ranges(adev->powerplay.pp_handle,
+							NULL);
 	mutex_unlock(&adev->pm.mutex);
 
 	return ret;
@@ -576,6 +629,9 @@ int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return -EOPNOTSUPP;
 
@@ -591,6 +647,9 @@ int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return -EOPNOTSUPP;
 
@@ -605,6 +664,9 @@ uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 {
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return 0;
+
 	if (!is_support_sw_smu(adev))
 		return 0;
 
@@ -619,6 +681,9 @@ uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 				 enum gfx_change_state state)
 {
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	mutex_lock(&adev->pm.mutex);
 	if (adev->powerplay.pp_funcs &&
 	    adev->powerplay.pp_funcs->gfx_state_change_set)
@@ -632,27 +697,33 @@ int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 {
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return -EOPNOTSUPP;
 
 	return smu_get_ecc_info(smu, umc_ecc);
 }
 
-struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
-						     uint32_t idx)
+int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+				   uint32_t idx,
+				   struct amd_vce_state *vstate)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	struct amd_vce_state *vstate = NULL;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	if (!pp_funcs->get_vce_clock_state)
-		return NULL;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 					       idx);
 	mutex_unlock(&adev->pm.mutex);
 
-	return vstate;
+	return 0;
 }
 
 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
@@ -660,6 +731,9 @@ void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	mutex_lock(&adev->pm.mutex);
 
 	if (!pp_funcs->get_current_power_state) {
@@ -679,6 +753,9 @@ void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 				enum amd_pm_state_type state)
 {
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	mutex_lock(&adev->pm.mutex);
 	adev->pm.dpm.user_state = state;
 	mutex_unlock(&adev->pm.mutex);
@@ -692,19 +769,22 @@ void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 		amdgpu_dpm_compute_clocks(adev);
 }
 
-enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
+int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
+				     enum amd_dpm_forced_level *level)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	enum amd_dpm_forced_level level;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	if (pp_funcs->get_performance_level)
-		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
+		*level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 	else
-		level = adev->pm.dpm.forced_level;
+		*level = adev->pm.dpm.forced_level;
 	mutex_unlock(&adev->pm.mutex);
 
-	return level;
+	return 0;
 }
 
 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
@@ -717,13 +797,16 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->force_performance_level)
 		return 0;
 
 	if (adev->pm.dpm.thermal_active)
 		return -EINVAL;
 
-	current_level = amdgpu_dpm_get_performance_level(adev);
+	amdgpu_dpm_get_performance_level(adev, &current_level);
 	if (current_level == level)
 		return 0;
 
@@ -783,6 +866,9 @@ int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_pp_num_states)
 		return -EOPNOTSUPP;
 
@@ -801,6 +887,9 @@ int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->dispatch_tasks)
 		return -EOPNOTSUPP;
 
@@ -818,6 +907,9 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_pp_table)
 		return 0;
 
@@ -837,6 +929,9 @@ int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fine_grain_clk_vol)
 		return 0;
 
@@ -858,6 +953,9 @@ int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->odn_edit_dpm_table)
 		return 0;
 
@@ -878,6 +976,9 @@ int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->print_clock_levels)
 		return 0;
 
@@ -917,6 +1018,9 @@ int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_ppfeature_status)
 		return 0;
 
@@ -933,6 +1037,9 @@ int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_ppfeature_status)
 		return 0;
 
@@ -951,6 +1058,9 @@ int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->force_clock_level)
 		return 0;
 
@@ -963,27 +1073,33 @@ int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
 	return ret;
 }
 
-int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev,
+			   uint32_t *value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	int ret = 0;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	if (!pp_funcs->get_sclk_od)
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
-	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
+	*value = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
 	mutex_unlock(&adev->pm.mutex);
 
-	return ret;
+	return 0;
 }
 
 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev))
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	if (pp_funcs->set_sclk_od)
@@ -1000,27 +1116,33 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
 	return 0;
 }
 
-int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev,
+			   uint32_t *value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	int ret = 0;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	if (!pp_funcs->get_mclk_od)
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
-	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
+	*value = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
 	mutex_unlock(&adev->pm.mutex);
 
-	return ret;
+	return 0;
 }
 
 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev))
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	if (pp_funcs->set_mclk_od)
@@ -1043,6 +1165,9 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_power_profile_mode)
 		return -EOPNOTSUPP;
 
@@ -1060,6 +1185,9 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_power_profile_mode)
 		return 0;
 
@@ -1077,6 +1205,9 @@ int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_gpu_metrics)
 		return 0;
 
@@ -1094,6 +1225,9 @@ int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -1111,6 +1245,9 @@ int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -1128,6 +1265,9 @@ int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -1145,6 +1285,9 @@ int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -1162,6 +1305,9 @@ int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -1179,6 +1325,9 @@ int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -1198,6 +1347,9 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_power_limit)
 		return -ENODATA;
 
@@ -1217,6 +1369,9 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_power_limit)
 		return -EINVAL;
 
@@ -1232,6 +1387,9 @@ int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
 {
 	bool cclk_dpm_supported = false;
 
+	if (!adev->pm.dpm_enabled)
+		return false;
+
 	if (!is_support_sw_smu(adev))
 		return false;
 
@@ -1247,6 +1405,9 @@ int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *ade
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->debugfs_print_current_performance_level)
 		return -EOPNOTSUPP;
 
@@ -1265,6 +1426,9 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_smu_prv_buf_details)
 		return -ENOSYS;
 
@@ -1282,6 +1446,9 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return false;
+
 	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
 	    (is_support_sw_smu(adev) && smu->is_apu) ||
 		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
@@ -1297,6 +1464,9 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_pp_table)
 		return -EOPNOTSUPP;
 
@@ -1313,6 +1483,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
 {
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return INT_MAX;
+
 	if (!is_support_sw_smu(adev))
 		return INT_MAX;
 
@@ -1321,6 +1494,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
 
 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
 {
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!is_support_sw_smu(adev))
 		return;
 
@@ -1333,6 +1509,9 @@ int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->display_configuration_change)
 		return 0;
 
@@ -1351,6 +1530,9 @@ int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_clock_by_type)
 		return 0;
 
@@ -1369,6 +1551,9 @@ int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_display_mode_validation_clocks)
 		return 0;
 
@@ -1387,6 +1572,9 @@ int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_clock_by_type_with_latency)
 		return 0;
 
@@ -1406,6 +1594,9 @@ int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_clock_by_type_with_voltage)
 		return 0;
 
@@ -1424,6 +1615,9 @@ int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
 		return -EOPNOTSUPP;
 
@@ -1441,6 +1635,9 @@ int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->display_clock_voltage_request)
 		return -EOPNOTSUPP;
 
@@ -1458,6 +1655,9 @@ int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_current_clocks)
 		return -EOPNOTSUPP;
 
@@ -1473,6 +1673,9 @@ void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!pp_funcs->notify_smu_enable_pwe)
 		return;
 
@@ -1487,6 +1690,9 @@ int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_active_display_count)
 		return -EOPNOTSUPP;
 
@@ -1504,6 +1710,9 @@ int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
 		return -EOPNOTSUPP;
 
@@ -1520,6 +1729,9 @@ void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
 		return;
 
@@ -1534,6 +1746,9 @@ void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!pp_funcs->set_hard_min_fclk_by_freq)
 		return;
 
@@ -1549,6 +1764,9 @@ int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->display_disable_memory_clock_switch)
 		return 0;
 
@@ -1566,6 +1784,9 @@ int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
 		return -EOPNOTSUPP;
 
@@ -1584,6 +1805,9 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_uclk_dpm_states)
 		return -EOPNOTSUPP;
 
@@ -1602,6 +1826,9 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_dpm_clock_table)
 		return -EOPNOTSUPP;
 
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b0243068212b..84aab3bb9bdc 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -273,11 +273,14 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
 		return ret;
 	}
 
-	level = amdgpu_dpm_get_performance_level(adev);
+	ret = amdgpu_dpm_get_performance_level(adev, &level);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return sysfs_emit(buf, "%s\n",
 			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
 			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -1241,11 +1244,14 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
 		return ret;
 	}
 
-	value = amdgpu_dpm_get_sclk_od(adev);
+	ret = amdgpu_dpm_get_sclk_od(adev, &value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return sysfs_emit(buf, "%d\n", value);
 }
 
@@ -1275,11 +1281,14 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
 		return ret;
 	}
 
-	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+	ret = amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return count;
 }
 
@@ -1303,11 +1312,14 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
 		return ret;
 	}
 
-	value = amdgpu_dpm_get_mclk_od(adev);
+	ret = amdgpu_dpm_get_mclk_od(adev, &value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return sysfs_emit(buf, "%d\n", value);
 }
 
@@ -1337,11 +1349,14 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
 		return ret;
 	}
 
-	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+	ret = amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return count;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index ddfa55b59d02..49488aebd350 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -429,12 +429,14 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 				 enum gfx_change_state state);
 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 			    void *umc_ecc);
-struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
-						     uint32_t idx);
+int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+				   uint32_t idx,
+				   struct amd_vce_state *vstate);
 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state);
 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 				enum amd_pm_state_type state);
-enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
+int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
+				     enum amd_dpm_forced_level *level);
 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 				       enum amd_dpm_forced_level level);
 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
@@ -464,9 +466,9 @@ int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf);
 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
 				 enum pp_clock_type type,
 				 uint32_t mask);
-int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev, uint32_t *value);
 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
-int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev, uint32_t *value);
 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value);
 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
 				      char *buf);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 9613c6181c17..59550617cf54 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -959,10 +959,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 	int ret;
 	bool equal = false;
 
-	/* if dpm init failed */
-	if (!adev->pm.dpm_enabled)
-		return 0;
-
 	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
 		/* add other state override checks here */
 		if ((!adev->pm.dpm.thermal_active) &&
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 991ac4adb263..bba923cfe08c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -295,7 +295,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
@@ -335,7 +335,7 @@ static int pp_dpm_force_performance_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (level == hwmgr->dpm_level)
@@ -353,7 +353,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	return hwmgr->dpm_level;
@@ -363,7 +363,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
@@ -377,7 +377,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
@@ -391,7 +391,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
@@ -405,7 +405,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
@@ -420,7 +420,7 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	return hwmgr_handle_task(hwmgr, task_id, user_state);
@@ -432,7 +432,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 	struct pp_power_state *state;
 	enum amd_pm_state_type pm_type;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->current_ps)
+	if (!hwmgr || !hwmgr->current_ps)
 		return -EINVAL;
 
 	state = hwmgr->current_ps;
@@ -462,7 +462,7 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
@@ -480,7 +480,7 @@ static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
@@ -497,7 +497,7 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
@@ -513,7 +513,7 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
@@ -529,7 +529,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
@@ -545,7 +545,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
@@ -565,7 +565,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
 
 	memset(data, 0, sizeof(*data));
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->ps)
+	if (!hwmgr || !hwmgr->ps)
 		return -EINVAL;
 
 	data->nums = hwmgr->num_ps;
@@ -597,7 +597,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->soft_pp_table)
+	if (!hwmgr || !hwmgr->soft_pp_table)
 		return -EINVAL;
 
 	*table = (char *)hwmgr->soft_pp_table;
@@ -625,7 +625,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = -ENOMEM;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (!hwmgr->hardcode_pp_table) {
@@ -655,7 +655,7 @@ static int pp_dpm_force_clock_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
@@ -676,7 +676,7 @@ static int pp_dpm_print_clock_levels(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
@@ -690,7 +690,7 @@ static int pp_dpm_get_sclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
@@ -704,7 +704,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
@@ -719,7 +719,7 @@ static int pp_dpm_get_mclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
@@ -733,7 +733,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
@@ -748,7 +748,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !value)
+	if (!hwmgr || !value)
 		return -EINVAL;
 
 	switch (idx) {
@@ -774,7 +774,7 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return NULL;
 
 	if (idx < hwmgr->num_vce_state_tables)
@@ -786,7 +786,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_power_profile_mode)
+	if (!hwmgr || !hwmgr->hwmgr_func->get_power_profile_mode)
 		return -EOPNOTSUPP;
 	if (!buf)
 		return -EINVAL;
@@ -798,7 +798,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_power_profile_mode)
+	if (!hwmgr || !hwmgr->hwmgr_func->set_power_profile_mode)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -813,7 +813,7 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
@@ -826,7 +826,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
@@ -860,7 +860,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
@@ -900,7 +900,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 	struct pp_hwmgr *hwmgr = handle;
 	uint32_t max_power_limit;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
@@ -932,7 +932,7 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!limit)
+	if (!hwmgr || !limit)
 		return -EINVAL;
 
 	if (power_type != PP_PWR_TYPE_SUSTAINED)
@@ -965,7 +965,7 @@ static int pp_display_configuration_change(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	phm_store_dal_configuration_data(hwmgr, display_config);
@@ -977,7 +977,7 @@ static int pp_get_display_power_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!output)
+	if (!hwmgr || !output)
 		return -EINVAL;
 
 	return phm_get_dal_power_level(hwmgr, output);
@@ -991,7 +991,7 @@ static int pp_get_current_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	phm_get_dal_power_level(hwmgr, &simple_clocks);
@@ -1035,7 +1035,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (clocks == NULL)
@@ -1050,7 +1050,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
+	if (!hwmgr || !clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
@@ -1062,7 +1062,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
+	if (!hwmgr || !clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
@@ -1073,7 +1073,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !clock_ranges)
+	if (!hwmgr || !clock_ranges)
 		return -EINVAL;
 
 	return phm_set_watermarks_for_clocks_ranges(hwmgr,
@@ -1085,7 +1085,7 @@ static int pp_display_clock_voltage_request(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clock)
+	if (!hwmgr || !clock)
 		return -EINVAL;
 
 	return phm_display_clock_voltage_request(hwmgr, clock);
@@ -1097,7 +1097,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
+	if (!hwmgr || !clocks)
 		return -EINVAL;
 
 	clocks->level = PP_DAL_POWERLEVEL_7;
@@ -1112,7 +1112,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
@@ -1127,7 +1127,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return 0;
 
 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
@@ -1142,7 +1142,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
@@ -1208,7 +1208,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
@@ -1228,8 +1228,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||
-	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
+	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
 		return 0;
 
 	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
@@ -1241,7 +1240,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
@@ -1258,7 +1257,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
@@ -1275,7 +1274,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
@@ -1292,7 +1291,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	return phm_set_active_display_count(hwmgr, count);
@@ -1350,7 +1349,7 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !buf)
+	if (!hwmgr || !buf)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
@@ -1365,7 +1364,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
@@ -1395,7 +1394,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
@@ -1413,7 +1412,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_df_cstate)
+	if (!hwmgr->hwmgr_func->set_df_cstate)
 		return 0;
 
 	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
@@ -1428,7 +1427,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_xgmi_pstate)
+	if (!hwmgr->hwmgr_func->set_xgmi_pstate)
 		return 0;
 
 	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
@@ -1443,7 +1442,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_gpu_metrics)
+	if (!hwmgr->hwmgr_func->get_gpu_metrics)
 		return -EOPNOTSUPP;
 
 	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
@@ -1453,7 +1452,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 96a3388c2cb7..97c57a6cf314 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -68,9 +68,6 @@ static int smu_sys_get_pp_feature_mask(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_get_pp_feature_mask(smu, buf);
 }
 
@@ -79,9 +76,6 @@ static int smu_sys_set_pp_feature_mask(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_set_pp_feature_mask(smu, new_mask);
 }
 
@@ -219,13 +213,6 @@ static int smu_dpm_set_power_gate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled) {
-		dev_WARN(smu->adev->dev,
-			 "SMU uninitialized but power %s requested for %u!\n",
-			 gate ? "gate" : "ungate", block_type);
-		return -EOPNOTSUPP;
-	}
-
 	switch (block_type) {
 	/*
 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
@@ -315,9 +302,6 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
 	if (!smu->adev->in_suspend)
 		return;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return;
-
 	/* Enable restore flag */
 	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
 
@@ -428,9 +412,6 @@ static int smu_sys_get_pp_table(void *handle,
 	struct smu_context *smu = handle;
 	struct smu_table_context *smu_table = &smu->smu_table;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
 		return -EINVAL;
 
@@ -451,9 +432,6 @@ static int smu_sys_set_pp_table(void *handle,
 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (header->usStructureSize != size) {
 		dev_err(smu->adev->dev, "pp table size not matched !\n");
 		return -EIO;
@@ -1564,9 +1542,6 @@ static int smu_display_configuration_change(void *handle,
 	int index = 0;
 	int num_of_active_display = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!display_config)
 		return -EINVAL;
 
@@ -1704,9 +1679,6 @@ static int smu_handle_task(struct smu_context *smu,
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	switch (task_id) {
 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
 		ret = smu_pre_display_config_changed(smu);
@@ -1745,9 +1717,6 @@ static int smu_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
 		return -EINVAL;
 
@@ -1775,9 +1744,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
 	struct smu_context *smu = handle;
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
 		return -EINVAL;
 
@@ -1791,9 +1757,6 @@ static int smu_force_performance_level(void *handle,
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
 		return -EINVAL;
 
@@ -1817,9 +1780,6 @@ static int smu_set_display_count(void *handle, uint32_t count)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_init_display_count(smu, count);
 }
 
@@ -1830,9 +1790,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
 		return -EINVAL;
@@ -1917,9 +1874,6 @@ static int smu_set_df_cstate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
 		return 0;
 
@@ -1934,9 +1888,6 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
 		return 0;
 
@@ -1947,22 +1898,11 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 	return ret;
 }
 
-int smu_write_watermarks_table(struct smu_context *smu)
-{
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
-	return smu_set_watermarks_table(smu, NULL);
-}
-
 static int smu_set_watermarks_for_clock_ranges(void *handle,
 					       struct pp_smu_wm_range_sets *clock_ranges)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->disable_watermark)
 		return 0;
 
@@ -1973,9 +1913,6 @@ int smu_set_ac_dc(struct smu_context *smu)
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	/* controlled by firmware */
 	if (smu->dc_controlled_by_gpio)
 		return 0;
@@ -2083,9 +2020,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->set_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -2126,9 +2060,6 @@ int smu_get_power_limit(void *handle,
 	uint32_t limit_type;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	switch(pp_power_type) {
 	case PP_PWR_TYPE_SUSTAINED:
 		limit_type = SMU_DEFAULT_PPT_LIMIT;
@@ -2199,9 +2130,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
 	uint32_t limit_type = limit >> 24;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	limit &= (1<<24)-1;
 	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
 		if (smu->ppt_funcs->set_power_limit)
@@ -2230,9 +2158,6 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->print_clk_levels)
 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
 
@@ -2319,9 +2244,6 @@ static int smu_od_edit_dpm_table(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->od_edit_dpm_table) {
 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
 	}
@@ -2340,9 +2262,6 @@ static int smu_read_sensor(void *handle,
 	int ret = 0;
 	uint32_t *size, size_val;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!data || !size_arg)
 		return -EINVAL;
 
@@ -2399,8 +2318,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled ||
-	    !smu->ppt_funcs->get_power_profile_mode)
+	if (!smu->ppt_funcs->get_power_profile_mode)
 		return -EOPNOTSUPP;
 	if (!buf)
 		return -EINVAL;
@@ -2414,8 +2332,7 @@ static int smu_set_power_profile_mode(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled ||
-	    !smu->ppt_funcs->set_power_profile_mode)
+	if (!smu->ppt_funcs->set_power_profile_mode)
 		return -EOPNOTSUPP;
 
 	return smu_bump_power_profile_mode(smu, param, param_size);
@@ -2426,9 +2343,6 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -2445,9 +2359,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->set_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -2478,9 +2389,6 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -2497,9 +2405,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->set_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -2524,9 +2429,6 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -2542,9 +2444,6 @@ static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_set_min_dcef_deep_sleep(smu, clk);
 }
 
@@ -2556,9 +2455,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
 	enum smu_clk_type clk_type;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
 		switch (type) {
 		case amd_pp_sys_clock:
@@ -2590,9 +2486,6 @@ static int smu_display_clock_voltage_request(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->display_clock_voltage_request)
 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
 
@@ -2606,9 +2499,6 @@ static int smu_display_disable_memory_clock_switch(void *handle,
 	struct smu_context *smu = handle;
 	int ret = -EINVAL;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
 
@@ -2621,9 +2511,6 @@ static int smu_set_xgmi_pstate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->set_xgmi_pstate)
 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
 
@@ -2722,9 +2609,6 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
 
@@ -2738,9 +2622,6 @@ static int smu_get_uclk_dpm_states(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_uclk_dpm_states)
 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
 
@@ -2752,9 +2633,6 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
 	struct smu_context *smu = handle;
 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_current_power_state)
 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
 
@@ -2767,9 +2645,6 @@ static int smu_get_dpm_clock_table(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_dpm_clock_table)
 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
 
@@ -2780,9 +2655,6 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_gpu_metrics)
 		return -EOPNOTSUPP;
 
@@ -2794,9 +2666,6 @@ static int smu_enable_mgpu_fan_boost(void *handle)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 39d169440d15..bced761f3f96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1399,7 +1399,6 @@ extern const struct amd_ip_funcs smu_ip_funcs;
 
 bool is_support_sw_smu(struct amdgpu_device *adev);
 bool is_support_cclk_dpm(struct amdgpu_device *adev);
-int smu_write_watermarks_table(struct smu_context *smu);
 
 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			   uint32_t *min, uint32_t *max);
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 06/12] drm/amd/pm: correct the checks for sriov(pp_one_vf)
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (3 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs Evan Quan
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

By setting pm_enabled as false for non pp_one_vf sriov case,
we can avoid the check for (amdgpu_sriov_vf(adev) &&
!amdgpu_sriov_is_pp_one_vf(adev)) in every routine.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I3859529183cd26dce98c57dc87eab5273ecc949b
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 21 ++++-----------------
 1 file changed, 4 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 97c57a6cf314..8b8feaf7aa0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -543,7 +543,8 @@ static int smu_early_init(void *handle)
 		return -ENOMEM;
 
 	smu->adev = adev;
-	smu->pm_enabled = !!amdgpu_dpm;
+	smu->pm_enabled = amdgpu_dpm &&
+			  (!amdgpu_sriov_vf(adev) || amdgpu_sriov_is_pp_one_vf(adev));
 	smu->is_apu = false;
 	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
 	smu->smu_baco.platform_support = false;
@@ -1257,10 +1258,8 @@ static int smu_hw_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
-	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
-		smu->pm_enabled = false;
+	if (!smu->pm_enabled)
 		return 0;
-	}
 
 	ret = smu_start_smc_engine(smu);
 	if (ret) {
@@ -1274,9 +1273,6 @@ static int smu_hw_init(void *handle)
 		smu_set_gfx_cgpg(smu, true);
 	}
 
-	if (!smu->pm_enabled)
-		return 0;
-
 	/* get boot_values from vbios to set revision, gfxclk, and etc. */
 	ret = smu_get_vbios_bootup_values(smu);
 	if (ret) {
@@ -1428,7 +1424,7 @@ static int smu_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
-	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+	if (!smu->pm_enabled)
 		return 0;
 
 	smu_dpm_set_vcn_enable(smu, false);
@@ -1437,9 +1433,6 @@ static int smu_hw_fini(void *handle)
 	adev->vcn.cur_state = AMD_PG_STATE_GATE;
 	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
 
-	if (!smu->pm_enabled)
-		return 0;
-
 	adev->pm.dpm_enabled = false;
 
 	return smu_smc_hw_cleanup(smu);
@@ -1479,9 +1472,6 @@ static int smu_suspend(void *handle)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret;
 
-	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
-		return 0;
-
 	if (!smu->pm_enabled)
 		return 0;
 
@@ -1504,9 +1494,6 @@ static int smu_resume(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
-	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
-		return 0;
-
 	if (!smu->pm_enabled)
 		return 0;
 
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (4 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 06/12] drm/amd/pm: correct the checks for sriov(pp_one_vf) Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-14  4:04   ` Lazar, Lijo
  2022-02-11  7:52 ` [PATCH 08/12] drm/amd/pm: add proper check for amdgpu_dpm before granting pp_dpm_load_fw Evan Quan
                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

Those gpu reset APIs can be granted when:
  - System is up and dpm features are enabled.
  - System is under resuming and dpm features are not yet enabled.
    Under such scenario, the PMFW is already alive and can support
    those gpu reset functionalities.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I8c2f07138921eb53a2bd7fb94f9b3622af0eacf8
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    |  1 +
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 34 +++++++++++++++
 .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 42 +++++++++++++++----
 .../drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c   |  1 +
 .../drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c   | 17 ++++++++
 drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h  |  1 +
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 32 +++++++-------
 7 files changed, 101 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index a4c267f15959..892648a4a353 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -409,6 +409,7 @@ struct amd_pm_funcs {
 				   struct dpm_clocks *clock_table);
 	int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
 	void (*pm_compute_clocks)(void *handle);
+	bool (*is_smc_alive)(void *handle);
 };
 
 struct metrics_table_header {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index b46ae0063047..5f1d3342f87b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -120,12 +120,25 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 	return ret;
 }
 
+static bool amdgpu_dpm_is_smc_alive(struct amdgpu_device *adev)
+{
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+	if (!pp_funcs || !pp_funcs->is_smc_alive)
+		return false;
+
+	return pp_funcs->is_smc_alive;
+}
+
 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	void *pp_handle = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 		return -ENOENT;
 
@@ -145,6 +158,9 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
 	void *pp_handle = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 		return -ENOENT;
 
@@ -164,6 +180,9 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 	int ret = 0;
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->set_mp1_state) {
 		mutex_lock(&adev->pm.mutex);
 
@@ -184,6 +203,9 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 	bool baco_cap;
 	int ret = 0;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return false;
+
 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
 		return false;
 
@@ -203,6 +225,9 @@ int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 	void *pp_handle = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
 		return -ENOENT;
 
@@ -221,6 +246,9 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
 	void *pp_handle = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
 		return -ENOENT;
 
@@ -244,6 +272,9 @@ bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	bool support_mode1_reset = false;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return false;
+
 	if (is_support_sw_smu(adev)) {
 		mutex_lock(&adev->pm.mutex);
 		support_mode1_reset = smu_mode1_reset_is_support(smu);
@@ -258,6 +289,9 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = -EOPNOTSUPP;
 
+	if (!amdgpu_dpm_is_smc_alive(adev))
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev)) {
 		mutex_lock(&adev->pm.mutex);
 		ret = smu_mode1_reset(smu);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index bba923cfe08c..4c709f7bcd51 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -844,9 +844,6 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!hwmgr->pm_en)
-		return 0;
-
 	if (hwmgr->hwmgr_func->set_mp1_state)
 		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
 
@@ -1305,8 +1302,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!(hwmgr->not_vf && amdgpu_dpm) ||
-		!hwmgr->hwmgr_func->get_asic_baco_capability)
+	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
 		return 0;
 
 	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
@@ -1321,7 +1317,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
+	if (!hwmgr->hwmgr_func->get_asic_baco_state)
 		return 0;
 
 	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
@@ -1336,8 +1332,7 @@ static int pp_set_asic_baco_state(void *handle, int state)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!(hwmgr->not_vf && amdgpu_dpm) ||
-		!hwmgr->hwmgr_func->set_asic_baco_state)
+	if (!hwmgr->hwmgr_func->set_asic_baco_state)
 		return 0;
 
 	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
@@ -1379,7 +1374,7 @@ static int pp_asic_reset_mode_2(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->asic_reset == NULL) {
@@ -1517,6 +1512,34 @@ static void pp_pm_compute_clocks(void *handle)
 			      NULL);
 }
 
+/* MP Apertures */
+#define MP1_Public					0x03b00000
+#define smnMP1_FIRMWARE_FLAGS				0x3010028
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK	0x00000001L
+
+static bool pp_is_smc_alive(void *handle)
+{
+	struct pp_hwmgr *hwmgr = handle;
+	struct amdgpu_device *adev = hwmgr->adev;
+	uint32_t mp1_fw_flags;
+
+	/*
+	 * If some ASIC(e.g. smu7/smu8) needs special handling for
+	 * checking smc alive, it should have its own implementation
+	 * for ->is_smc_alive.
+	 */
+	if (hwmgr->hwmgr_func->is_smc_alive)
+		return hwmgr->hwmgr_func->is_smc_alive(hwmgr);
+
+	mp1_fw_flags = RREG32_PCIE(MP1_Public |
+				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+	if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+		return true;
+
+	return false;
+}
+
 static const struct amd_pm_funcs pp_dpm_funcs = {
 	.load_firmware = pp_dpm_load_fw,
 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1582,4 +1605,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
 	.gfx_state_change_set = pp_gfx_state_change_set,
 	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
 	.pm_compute_clocks = pp_pm_compute_clocks,
+	.is_smc_alive = pp_is_smc_alive,
 };
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index a1e11037831a..118039b96524 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5735,6 +5735,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
 	.get_asic_baco_state = smu7_baco_get_state,
 	.set_asic_baco_state = smu7_baco_set_state,
 	.power_off_asic = smu7_power_off_asic,
+	.is_smc_alive = smu7_is_smc_ram_running,
 };
 
 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index b50fd4a4a3d1..fc4d58329f6d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -2015,6 +2015,22 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 	}
 }
 
+#define ixMP1_FIRMWARE_FLAGS					0x3008210
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK		0x00000001L
+
+static bool smu8_is_smc_running(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+	uint32_t mp1_fw_flags;
+
+	mp1_fw_flags = RREG32_SMC(ixMP1_FIRMWARE_FLAGS);
+
+	if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+		return true;
+
+	return false;
+}
+
 static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
 	.backend_init = smu8_hwmgr_backend_init,
 	.backend_fini = smu8_hwmgr_backend_fini,
@@ -2047,6 +2063,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
 	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
 	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
 	.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
+	.is_smc_alive = smu8_is_smc_running,
 };
 
 int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 4f7f2f455301..790fc387752c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -364,6 +364,7 @@ struct pp_hwmgr_func {
 					bool disable);
 	ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table);
 	int (*gfx_state_change)(struct pp_hwmgr *hwmgr, uint32_t state);
+	bool (*is_smc_alive)(struct pp_hwmgr *hwmgr);
 };
 
 struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8b8feaf7aa0e..27a453fb4db7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1845,9 +1845,6 @@ static int smu_set_mp1_state(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs &&
 	    smu->ppt_funcs->set_mp1_state)
 		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
@@ -2513,9 +2510,6 @@ static int smu_get_baco_capability(void *handle, bool *cap)
 
 	*cap = false;
 
-	if (!smu->pm_enabled)
-		return 0;
-
 	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
 		*cap = smu->ppt_funcs->baco_is_support(smu);
 
@@ -2527,9 +2521,6 @@ static int smu_baco_set_state(void *handle, int state)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled)
-		return -EOPNOTSUPP;
-
 	if (state == 0) {
 		if (smu->ppt_funcs->baco_exit)
 			ret = smu->ppt_funcs->baco_exit(smu);
@@ -2551,9 +2542,6 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
 {
 	bool ret = false;
 
-	if (!smu->pm_enabled)
-		return false;
-
 	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
 
@@ -2564,9 +2552,6 @@ int smu_mode1_reset(struct smu_context *smu)
 {
 	int ret = 0;
 
-	if (!smu->pm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->mode1_reset)
 		ret = smu->ppt_funcs->mode1_reset(smu);
 
@@ -2578,9 +2563,6 @@ static int smu_mode2_reset(void *handle)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->pm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->mode2_reset)
 		ret = smu->ppt_funcs->mode2_reset(smu);
 
@@ -2712,6 +2694,19 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
 	return 0;
 }
 
+static bool smu_is_smc_alive(void *handle)
+{
+	struct smu_context *smu = handle;
+
+	if (!smu->ppt_funcs->check_fw_status)
+		return false;
+
+	if (!smu->ppt_funcs->check_fw_status(smu))
+		return true;
+
+	return false;
+}
+
 static const struct amd_pm_funcs swsmu_pm_funcs = {
 	/* export for sysfs */
 	.set_fan_control_mode    = smu_set_fan_control_mode,
@@ -2765,6 +2760,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
 	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
 	.get_dpm_clock_table              = smu_get_dpm_clock_table,
 	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
+	.is_smc_alive = smu_is_smc_alive,
 };
 
 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 08/12] drm/amd/pm: add proper check for amdgpu_dpm before granting pp_dpm_load_fw
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (5 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 09/12] drm/amd/pm: drop redundant !pp_funcs check Evan Quan
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

Make sure the interface get granted only when amdgpu_dpm enabled.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: Ia1d1123470fab89b41b24ea80dcb319570aa7438
---
 drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 6 ++++++
 drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c   | 3 ---
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 4c709f7bcd51..e95893556147 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -49,6 +49,9 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 
 	hwmgr->adev = adev;
 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
+	hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf(adev);
+	hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
+			? true : false;
 	hwmgr->device = amdgpu_cgs_create_device(adev);
 	mutex_init(&hwmgr->msg_lock);
 	hwmgr->chip_family = adev->family;
@@ -275,6 +278,9 @@ static int pp_dpm_load_fw(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
+	if (!hwmgr->pm_en)
+		return -EOPNOTSUPP;
+
 	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index 4fd61d7f6c70..c0c2f36094fa 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -217,9 +217,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
-	hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev);
-	hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
-			? true : false;
 	if (!hwmgr->pm_en)
 		return 0;
 
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 09/12] drm/amd/pm: drop redundant !pp_funcs check
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (6 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 08/12] drm/amd/pm: add proper check for amdgpu_dpm before granting pp_dpm_load_fw Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 10/12] drm/amd/pm: drop nonsense !smu->ppt_funcs check Evan Quan
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

As it can be covered by the "!adev->pm.dpm_enabled" check. As long as
"adev->pm.dpm_enabled != NULL", "pp_funcs != NULL" can be also guarded.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: Iec801f18a0069ad5fd384c4133016977fb2b67e8
---
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 22 ++++++++++------------
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 5f1d3342f87b..f237dd3a3f66 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -104,7 +104,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 	case AMD_IP_BLOCK_TYPE_JPEG:
 	case AMD_IP_BLOCK_TYPE_GMC:
 	case AMD_IP_BLOCK_TYPE_ACP:
-		if (pp_funcs && pp_funcs->set_powergating_by_smu)
+		if (pp_funcs->set_powergating_by_smu)
 			ret = (pp_funcs->set_powergating_by_smu(
 				(adev)->powerplay.pp_handle, block_type, gate));
 		break;
@@ -314,7 +314,7 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 	if (amdgpu_sriov_vf(adev))
 		return 0;
 
-	if (pp_funcs && pp_funcs->switch_power_profile) {
+	if (pp_funcs->switch_power_profile) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->switch_power_profile(
 			adev->powerplay.pp_handle, type, en);
@@ -333,7 +333,7 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 	if (!adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
-	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
+	if (pp_funcs->set_xgmi_pstate) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
 								pstate);
@@ -353,7 +353,7 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 	if (!adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
-	if (pp_funcs && pp_funcs->set_df_cstate) {
+	if (pp_funcs->set_df_cstate) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
 		mutex_unlock(&adev->pm.mutex);
@@ -389,7 +389,7 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 	if (!adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
-	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
+	if (pp_funcs->enable_mgpu_fan_boost) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
 		mutex_unlock(&adev->pm.mutex);
@@ -409,7 +409,7 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 	if (!adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
-	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
+	if (pp_funcs->set_clockgating_by_smu) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
 						       msg_id);
@@ -430,7 +430,7 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 	if (!adev->pm.dpm_enabled)
 		return -EOPNOTSUPP;
 
-	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
+	if (pp_funcs->smu_i2c_bus_access) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
 						   acquire);
@@ -449,8 +449,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 		else
 			adev->pm.ac_power = false;
 
-		if (adev->powerplay.pp_funcs &&
-		    adev->powerplay.pp_funcs->enable_bapm)
+		if (adev->powerplay.pp_funcs->enable_bapm)
 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 
 		if (is_support_sw_smu(adev))
@@ -472,7 +471,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
 	if (!data || !size)
 		return -EINVAL;
 
-	if (pp_funcs && pp_funcs->read_sensor) {
+	if (pp_funcs->read_sensor) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
 					    sensor,
@@ -719,8 +718,7 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 		return;
 
 	mutex_lock(&adev->pm.mutex);
-	if (adev->powerplay.pp_funcs &&
-	    adev->powerplay.pp_funcs->gfx_state_change_set)
+	if (adev->powerplay.pp_funcs->gfx_state_change_set)
 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
 			(adev)->powerplay.pp_handle, state));
 	mutex_unlock(&adev->pm.mutex);
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 10/12] drm/amd/pm: drop nonsense !smu->ppt_funcs check
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (7 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 09/12] drm/amd/pm: drop redundant !pp_funcs check Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 11/12] drm/amd/pm: drop extra non-necessary null pointers checks Evan Quan
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

Since the "smu->ppt_funcs" was already well installed at early_init phase,
the checks afterwards make nonsense.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I07a945035a87b23032e4911bba768edacbd5e65a
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c   | 20 +++++++++-----------
 drivers/gpu/drm/amd/pm/swsmu/smu_internal.h |  2 +-
 2 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 27a453fb4db7..3773e95a18bf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -934,7 +934,7 @@ static void smu_interrupt_work_fn(struct work_struct *work)
 	struct smu_context *smu = container_of(work, struct smu_context,
 					       interrupt_work);
 
-	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
+	if (smu->ppt_funcs->interrupt_work)
 		smu->ppt_funcs->interrupt_work(smu);
 }
 
@@ -1782,7 +1782,7 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
 		return -EINVAL;
 	}
 
-	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
+	if (smu->ppt_funcs->force_clk_levels) {
 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
 			smu->user_dpm_profile.clk_mask[clk_type] = mask;
@@ -1845,8 +1845,7 @@ static int smu_set_mp1_state(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (smu->ppt_funcs &&
-	    smu->ppt_funcs->set_mp1_state)
+	if (smu->ppt_funcs->set_mp1_state)
 		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
 
 	return ret;
@@ -1858,7 +1857,7 @@ static int smu_set_df_cstate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+	if (!smu->ppt_funcs->set_df_cstate)
 		return 0;
 
 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
@@ -1872,7 +1871,7 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 {
 	int ret = 0;
 
-	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
+	if (!smu->ppt_funcs->allow_xgmi_power_down)
 		return 0;
 
 	ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
@@ -2510,7 +2509,7 @@ static int smu_get_baco_capability(void *handle, bool *cap)
 
 	*cap = false;
 
-	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
+	if (smu->ppt_funcs->baco_is_support)
 		*cap = smu->ppt_funcs->baco_is_support(smu);
 
 	return 0;
@@ -2542,7 +2541,7 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
 {
 	bool ret = false;
 
-	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
+	if (smu->ppt_funcs->mode1_reset_is_support)
 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
 
 	return ret;
@@ -2667,8 +2666,7 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
 {
 	int ret = -EOPNOTSUPP;
 
-	if (smu->ppt_funcs &&
-		smu->ppt_funcs->get_ecc_info)
+	if (smu->ppt_funcs->get_ecc_info)
 		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
 
 	return ret;
@@ -2881,7 +2879,7 @@ int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
 {
 	int ret = 0;
 
-	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
+	if (smu->ppt_funcs->send_hbm_bad_pages_num)
 		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 5f21ead860f9..a91967b31eeb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -28,7 +28,7 @@
 #if defined(SWSMU_CODE_LAYER_L1)
 
 #define smu_ppt_funcs(intf, ret, smu, args...) \
-	((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? (smu)->ppt_funcs->intf(smu, ##args) : ret) : -EINVAL)
+	((smu)->ppt_funcs->intf ? (smu)->ppt_funcs->intf(smu, ##args) : ret)
 
 #define smu_init_microcode(smu)						smu_ppt_funcs(init_microcode, 0, smu)
 #define smu_fini_microcode(smu)						smu_ppt_funcs(fini_microcode, 0, smu)
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 11/12] drm/amd/pm: drop extra non-necessary null pointers checks
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (8 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 10/12] drm/amd/pm: drop nonsense !smu->ppt_funcs check Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11  7:52 ` [PATCH 12/12] drm/amd/pm: revise the implementations for asic reset Evan Quan
  2022-02-11  7:55 ` [PATCH 01/12] drm/amd/pm: drop unused structure members Christian König
  11 siblings, 0 replies; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

They are totally redundant. The checks before them can guard
they cannot be NULL.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I9f31734f49a8093582fc321ef3d93233946006e3
---
 .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 182 ++----------------
 .../amd/pm/powerplay/hwmgr/hardwaremanager.c  |  42 ----
 .../gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c    |  17 +-
 .../amd/pm/powerplay/hwmgr/processpptables.c  |   2 +-
 .../gpu/drm/amd/pm/powerplay/smumgr/smumgr.c  |   6 +-
 5 files changed, 22 insertions(+), 227 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index e95893556147..81ec5464b679 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -40,9 +40,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 {
 	struct pp_hwmgr *hwmgr;
 
-	if (adev == NULL)
-		return -EINVAL;
-
 	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
 	if (hwmgr == NULL)
 		return -ENOMEM;
@@ -281,7 +278,7 @@ static int pp_dpm_load_fw(void *handle)
 	if (!hwmgr->pm_en)
 		return -EOPNOTSUPP;
 
-	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
+	if (!hwmgr->smumgr_funcs->start_smu)
 		return -EINVAL;
 
 	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
@@ -301,9 +298,6 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -341,9 +335,6 @@ static int pp_dpm_force_performance_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (level == hwmgr->dpm_level)
 		return 0;
 
@@ -359,9 +350,6 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	return hwmgr->dpm_level;
 }
 
@@ -369,9 +357,6 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return 0;
-
 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -383,9 +368,6 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return 0;
-
 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -397,9 +379,6 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return;
-
 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
@@ -411,9 +390,6 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return;
-
 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
@@ -426,9 +402,6 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	return hwmgr_handle_task(hwmgr, task_id, user_state);
 }
 
@@ -438,7 +411,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 	struct pp_power_state *state;
 	enum amd_pm_state_type pm_type;
 
-	if (!hwmgr || !hwmgr->current_ps)
+	if (!hwmgr->current_ps)
 		return -EINVAL;
 
 	state = hwmgr->current_ps;
@@ -468,9 +441,6 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EOPNOTSUPP;
-
 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
 		return -EOPNOTSUPP;
 
@@ -486,9 +456,6 @@ static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EOPNOTSUPP;
-
 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
 		return -EOPNOTSUPP;
 
@@ -503,9 +470,6 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EOPNOTSUPP;
-
 	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
 		return -EOPNOTSUPP;
 
@@ -519,9 +483,6 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EOPNOTSUPP;
-
 	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
 		return -EOPNOTSUPP;
 
@@ -535,9 +496,6 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EOPNOTSUPP;
-
 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
 		return -EOPNOTSUPP;
 
@@ -551,9 +509,6 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EOPNOTSUPP;
-
 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
 		return -EOPNOTSUPP;
 
@@ -571,7 +526,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
 
 	memset(data, 0, sizeof(*data));
 
-	if (!hwmgr || !hwmgr->ps)
+	if (!hwmgr->ps)
 		return -EINVAL;
 
 	data->nums = hwmgr->num_ps;
@@ -603,7 +558,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->soft_pp_table)
+	if (!hwmgr->soft_pp_table)
 		return -EINVAL;
 
 	*table = (char *)hwmgr->soft_pp_table;
@@ -631,9 +586,6 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = -ENOMEM;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (!hwmgr->hardcode_pp_table) {
 		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
 						   hwmgr->soft_pp_table_size,
@@ -661,9 +613,6 @@ static int pp_dpm_force_clock_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -682,9 +631,6 @@ static int pp_dpm_print_clock_levels(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -696,9 +642,6 @@ static int pp_dpm_get_sclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -710,9 +653,6 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -725,9 +665,6 @@ static int pp_dpm_get_mclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -739,9 +676,6 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -754,7 +688,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !value)
+	if (!value)
 		return -EINVAL;
 
 	switch (idx) {
@@ -780,9 +714,6 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return NULL;
-
 	if (idx < hwmgr->num_vce_state_tables)
 		return &hwmgr->vce_states[idx];
 	return NULL;
@@ -792,7 +723,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->hwmgr_func->get_power_profile_mode)
+	if (!hwmgr->hwmgr_func->get_power_profile_mode)
 		return -EOPNOTSUPP;
 	if (!buf)
 		return -EINVAL;
@@ -804,7 +735,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->hwmgr_func->set_power_profile_mode)
+	if (!hwmgr->hwmgr_func->set_power_profile_mode)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -819,9 +750,6 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
 		return 0;
 
@@ -832,9 +760,6 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -847,9 +772,6 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_mp1_state)
 		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
 
@@ -863,9 +785,6 @@ static int pp_dpm_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -903,9 +822,6 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 	struct pp_hwmgr *hwmgr = handle;
 	uint32_t max_power_limit;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -935,7 +851,7 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !limit)
+	if (!limit)
 		return -EINVAL;
 
 	if (power_type != PP_PWR_TYPE_SUSTAINED)
@@ -968,9 +884,6 @@ static int pp_display_configuration_change(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	phm_store_dal_configuration_data(hwmgr, display_config);
 	return 0;
 }
@@ -980,7 +893,7 @@ static int pp_get_display_power_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !output)
+	if (!output)
 		return -EINVAL;
 
 	return phm_get_dal_power_level(hwmgr, output);
@@ -994,9 +907,6 @@ static int pp_get_current_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	phm_get_dal_power_level(hwmgr, &simple_clocks);
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -1038,9 +948,6 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (clocks == NULL)
 		return -EINVAL;
 
@@ -1053,7 +960,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !clocks)
+	if (!clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
@@ -1065,7 +972,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !clocks)
+	if (!clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
@@ -1076,7 +983,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !clock_ranges)
+	if (!clock_ranges)
 		return -EINVAL;
 
 	return phm_set_watermarks_for_clocks_ranges(hwmgr,
@@ -1088,7 +995,7 @@ static int pp_display_clock_voltage_request(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !clock)
+	if (!clock)
 		return -EINVAL;
 
 	return phm_display_clock_voltage_request(hwmgr, clock);
@@ -1100,7 +1007,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !clocks)
+	if (!clocks)
 		return -EINVAL;
 
 	clocks->level = PP_DAL_POWERLEVEL_7;
@@ -1115,9 +1022,6 @@ static int pp_dpm_powergate_mmhub(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -1130,9 +1034,6 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return 0;
-
 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return 0;
@@ -1145,9 +1046,6 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return;
-
 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
@@ -1160,9 +1058,6 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return;
-
 	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return;
@@ -1211,9 +1106,6 @@ static int pp_notify_smu_enable_pwe(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -1228,9 +1120,6 @@ static int pp_enable_mgpu_fan_boost(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
 		return 0;
 
@@ -1243,9 +1132,6 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
 		pr_debug("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -1260,9 +1146,6 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
 		pr_debug("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -1277,9 +1160,6 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
 		pr_debug("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -1294,9 +1174,6 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	return phm_set_active_display_count(hwmgr, count);
 }
 
@@ -1305,8 +1182,6 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
 	struct pp_hwmgr *hwmgr = handle;
 
 	*cap = false;
-	if (!hwmgr)
-		return -EINVAL;
 
 	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
 		return 0;
@@ -1320,9 +1195,6 @@ static int pp_get_asic_baco_state(void *handle, int *state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (!hwmgr->hwmgr_func->get_asic_baco_state)
 		return 0;
 
@@ -1335,9 +1207,6 @@ static int pp_set_asic_baco_state(void *handle, int state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (!hwmgr->hwmgr_func->set_asic_baco_state)
 		return 0;
 
@@ -1350,7 +1219,7 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !buf)
+	if (!buf)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
@@ -1365,9 +1234,6 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -1380,9 +1246,6 @@ static int pp_asic_reset_mode_2(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->asic_reset == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -1395,9 +1258,6 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
@@ -1410,9 +1270,6 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (!hwmgr->hwmgr_func->set_df_cstate)
 		return 0;
 
@@ -1425,9 +1282,6 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (!hwmgr->hwmgr_func->set_xgmi_pstate)
 		return 0;
 
@@ -1440,9 +1294,6 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (!hwmgr->hwmgr_func->get_gpu_metrics)
 		return -EOPNOTSUPP;
 
@@ -1453,9 +1304,6 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
 		pr_info_ratelimited("%s was not implemented.\n", __func__);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 981dc8c7112d..f1d6b4b174b6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -30,16 +30,8 @@
 #define TEMP_RANGE_MIN (0)
 #define TEMP_RANGE_MAX (80 * 1000)
 
-#define PHM_FUNC_CHECK(hw) \
-	do {							\
-		if ((hw) == NULL || (hw)->hwmgr_func == NULL)	\
-			return -EINVAL;				\
-	} while (0)
-
 int phm_setup_asic(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
-
 	if (NULL != hwmgr->hwmgr_func->asic_setup)
 		return hwmgr->hwmgr_func->asic_setup(hwmgr);
 
@@ -48,8 +40,6 @@ int phm_setup_asic(struct pp_hwmgr *hwmgr)
 
 int phm_power_down_asic(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
-
 	if (NULL != hwmgr->hwmgr_func->power_off_asic)
 		return hwmgr->hwmgr_func->power_off_asic(hwmgr);
 
@@ -62,7 +52,6 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
 {
 	struct phm_set_power_state_input states;
 
-	PHM_FUNC_CHECK(hwmgr);
 
 	states.pcurrent_state = pcurrent_state;
 	states.pnew_state = pnew_power_state;
@@ -77,7 +66,6 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = NULL;
 	int ret = -EINVAL;
-	PHM_FUNC_CHECK(hwmgr);
 	adev = hwmgr->adev;
 
 	/* Skip for suspend/resume case */
@@ -98,7 +86,6 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 {
 	int ret = -EINVAL;
 
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (!hwmgr->not_vf)
 		return 0;
@@ -118,7 +105,6 @@ int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level
 {
 	int ret = 0;
 
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->force_dpm_level != NULL)
 		ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
@@ -130,7 +116,6 @@ int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 				   struct pp_power_state *adjusted_ps,
 			     const struct pp_power_state *current_ps)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
 		return hwmgr->hwmgr_func->apply_state_adjust_rules(
@@ -142,7 +127,6 @@ int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
 int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
 		return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
@@ -151,7 +135,6 @@ int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
 
 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
 		return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
@@ -161,7 +144,6 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 
 int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
 		return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
@@ -171,7 +153,6 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
 
 int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
 		hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
@@ -182,7 +163,6 @@ int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
 
 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (NULL != hwmgr->hwmgr_func->display_config_changed)
 		hwmgr->hwmgr_func->display_config_changed(hwmgr);
@@ -192,7 +172,6 @@ int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
 
 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
 			hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
@@ -202,7 +181,6 @@ int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 
 int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (!hwmgr->not_vf)
 		return 0;
@@ -215,7 +193,6 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
 
 int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->register_irq_handlers != NULL)
 		return hwmgr->hwmgr_func->register_irq_handlers(hwmgr);
@@ -272,10 +249,6 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
 
 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
 {
-	if (hwmgr == NULL ||
-	    hwmgr->hwmgr_func == NULL)
-		return false;
-
 	if (hwmgr->pp_one_vf)
 		return false;
 
@@ -291,7 +264,6 @@ int phm_check_states_equal(struct pp_hwmgr *hwmgr,
 				 const struct pp_hw_power_state *pstate2,
 				 bool *equal)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->check_states_equal == NULL)
 		return -EINVAL;
@@ -305,7 +277,6 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
 	int index = 0;
 	int number_of_active_display = 0;
 
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (display_config == NULL)
 		return -EINVAL;
@@ -339,7 +310,6 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
 		struct amd_pp_simple_clock_info *info)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
 		return -EINVAL;
@@ -348,7 +318,6 @@ int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
 
 int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->set_cpu_power_state != NULL)
 		return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr);
@@ -361,7 +330,6 @@ int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_s
 				PHM_PerformanceLevelDesignation designation, uint32_t index,
 				PHM_PerformanceLevel *level)
 {
-	PHM_FUNC_CHECK(hwmgr);
 	if (hwmgr->hwmgr_func->get_performance_level == NULL)
 		return -EINVAL;
 
@@ -386,7 +354,6 @@ int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *s
 	int result;
 	PHM_PerformanceLevel performance_level = {0};
 
-	PHM_FUNC_CHECK(hwmgr);
 
 	PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
 	PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
@@ -415,7 +382,6 @@ int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *s
 
 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
 		return -EINVAL;
@@ -426,7 +392,6 @@ int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp
 
 int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
 		return -EINVAL;
@@ -439,7 +404,6 @@ int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
 		enum amd_pp_clock_type type,
 		struct pp_clock_levels_with_latency *clocks)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL)
 		return -EINVAL;
@@ -452,7 +416,6 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 		enum amd_pp_clock_type type,
 		struct pp_clock_levels_with_voltage *clocks)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL)
 		return -EINVAL;
@@ -464,7 +427,6 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 					void *clock_ranges)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges)
 		return -EINVAL;
@@ -476,7 +438,6 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
 		struct pp_display_clock_request *clock)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (!hwmgr->hwmgr_func->display_clock_voltage_request)
 		return -EINVAL;
@@ -486,7 +447,6 @@ int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
 
 int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
 		return -EINVAL;
@@ -496,7 +456,6 @@ int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_i
 
 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (!hwmgr->not_vf)
 		return 0;
@@ -509,7 +468,6 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
 
 int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
 {
-	PHM_FUNC_CHECK(hwmgr);
 
 	if (!hwmgr->hwmgr_func->set_active_display_count)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index c0c2f36094fa..10f9b8fb93f2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -79,9 +79,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
 	hwmgr->pp_table_version = PP_TABLE_V1;
 	hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -195,7 +192,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 
 int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
 {
-	if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
+	if (!hwmgr->smumgr_funcs->smu_init)
 		return -EINVAL;
 
 	phm_register_irq_handlers(hwmgr);
@@ -207,7 +204,7 @@ int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
 
 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
 {
-	if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
+	if (hwmgr->smumgr_funcs->smu_fini)
 		hwmgr->smumgr_funcs->smu_fini(hwmgr);
 
 	return 0;
@@ -275,7 +272,7 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 
 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
 {
-	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
+	if (!hwmgr->pm_en || !hwmgr->not_vf)
 		return 0;
 
 	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false;
@@ -297,7 +294,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
-	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
+	if (!hwmgr->pm_en || !hwmgr->not_vf)
 		return 0;
 
 	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false;
@@ -318,9 +315,6 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
-	if (!hwmgr)
-		return -EINVAL;
-
 	if (!hwmgr->not_vf || !hwmgr->pm_en)
 		return 0;
 
@@ -362,9 +356,6 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
 {
 	int ret = 0;
 
-	if (hwmgr == NULL)
-		return -EINVAL;
-
 	switch (task_id) {
 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
 		if (!hwmgr->not_vf)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
index 182118e3fd5f..991fc4633e0b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
@@ -1484,7 +1484,7 @@ static int get_cac_leakage_table(struct pp_hwmgr *hwmgr,
 	struct phm_cac_leakage_table  *cac_leakage_table;
 	unsigned long i;
 
-	if (!hwmgr || !table || !ptable)
+	if (!table || !ptable)
 		return -EINVAL;
 
 	cac_leakage_table = kzalloc(struct_size(cac_leakage_table, entries, table->ucNumEntries),
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
index b6921db3c130..f2ec9282aff6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
@@ -131,8 +131,7 @@ int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp)
 {
 	int ret = 0;
 
-	if (hwmgr == NULL ||
-	    hwmgr->smumgr_funcs->send_msg_to_smc == NULL ||
+	if (hwmgr->smumgr_funcs->send_msg_to_smc == NULL ||
 	    (resp && !hwmgr->smumgr_funcs->get_argument))
 		return -EINVAL;
 
@@ -159,8 +158,7 @@ int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 {
 	int ret = 0;
 
-	if (hwmgr == NULL ||
-	    hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL ||
+	if (hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL ||
 	    (resp && !hwmgr->smumgr_funcs->get_argument))
 		return -EINVAL;
 
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 12/12] drm/amd/pm: revise the implementations for asic reset
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (9 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 11/12] drm/amd/pm: drop extra non-necessary null pointers checks Evan Quan
@ 2022-02-11  7:52 ` Evan Quan
  2022-02-11 13:21   ` Lazar, Lijo
  2022-02-11  7:55 ` [PATCH 01/12] drm/amd/pm: drop unused structure members Christian König
  11 siblings, 1 reply; 23+ messages in thread
From: Evan Quan @ 2022-02-11  7:52 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, Evan Quan, rui.huang

Instead of having an interface for every reset method, we replace them
with a new interface which can support all reset methods.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I4c8a7121dd65c2671085673dd7c13cf7e4286f3d
---
 drivers/gpu/drm/amd/amdgpu/aldebaran.c        |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    |   4 +-
 drivers/gpu/drm/amd/amdgpu/cik.c              |   4 +-
 drivers/gpu/drm/amd/amdgpu/nv.c               |  13 +-
 drivers/gpu/drm/amd/amdgpu/soc15.c            |  12 +-
 drivers/gpu/drm/amd/amdgpu/vi.c               |   6 +-
 .../gpu/drm/amd/include/kgd_pp_interface.h    |   7 +-
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c           |  89 ++-----------
 drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  13 +-
 .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  |  86 ++++++++----
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 126 +++++++++++-------
 drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   3 -
 12 files changed, 180 insertions(+), 185 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index a545df4efce1..22b787de313a 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -128,7 +128,7 @@ static int aldebaran_mode2_reset(struct amdgpu_device *adev)
 {
 	/* disable BM */
 	pci_clear_master(adev->pdev);
-	adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
+	adev->asic_reset_res = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE2);
 	return adev->asic_reset_res;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7931132ce6e3..b19bfdf81500 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4504,9 +4504,9 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
 
         amdgpu_device_cache_pci_state(adev->pdev);
 
-        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+        if (amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_MODE1)) {
                 dev_info(adev->dev, "GPU smu mode1 reset\n");
-                ret = amdgpu_dpm_mode1_reset(adev);
+                ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE1);
         } else {
                 dev_info(adev->dev, "GPU psp mode1 reset\n");
                 ret = psp_gpu_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f10ce740a29c..786975716eb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1380,7 +1380,7 @@ static bool cik_asic_supports_baco(struct amdgpu_device *adev)
 	switch (adev->asic_type) {
 	case CHIP_BONAIRE:
 	case CHIP_HAWAII:
-		return amdgpu_dpm_is_baco_supported(adev);
+		return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
 	default:
 		return false;
 	}
@@ -1434,7 +1434,7 @@ static int cik_asic_reset(struct amdgpu_device *adev)
 
 	if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 		dev_info(adev->dev, "BACO reset\n");
-		r = amdgpu_dpm_baco_reset(adev);
+		r = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
 	} else {
 		dev_info(adev->dev, "PCI CONFIG reset\n");
 		r = cik_asic_pci_config_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 494e17f65fc3..2e590008d3ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -414,7 +414,7 @@ static int nv_asic_mode2_reset(struct amdgpu_device *adev)
 
 	amdgpu_device_cache_pci_state(adev->pdev);
 
-	ret = amdgpu_dpm_mode2_reset(adev);
+	ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE2);
 	if (ret)
 		dev_err(adev->dev, "GPU mode2 reset failed\n");
 
@@ -458,7 +458,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
 	case IP_VERSION(11, 0, 13):
 		return AMD_RESET_METHOD_MODE1;
 	default:
-		if (amdgpu_dpm_is_baco_supported(adev))
+		if (amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO))
 			return AMD_RESET_METHOD_BACO;
 		else
 			return AMD_RESET_METHOD_MODE1;
@@ -476,7 +476,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
 		break;
 	case AMD_RESET_METHOD_BACO:
 		dev_info(adev->dev, "BACO reset\n");
-		ret = amdgpu_dpm_baco_reset(adev);
+		ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
 		break;
 	case AMD_RESET_METHOD_MODE2:
 		dev_info(adev->dev, "MODE2 reset\n");
@@ -641,6 +641,11 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
 	return 0;
 }
 
+static bool nv_asic_supports_baco(struct amdgpu_device *adev)
+{
+	return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
+}
+
 static const struct amdgpu_asic_funcs nv_asic_funcs =
 {
 	.read_disabled_bios = &nv_read_disabled_bios,
@@ -657,7 +662,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
 	.need_full_reset = &nv_need_full_reset,
 	.need_reset_on_init = &nv_need_reset_on_init,
 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
-	.supports_baco = &amdgpu_dpm_is_baco_supported,
+	.supports_baco = &nv_asic_supports_baco,
 	.pre_asic_init = &nv_pre_asic_init,
 	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
 	.query_video_codecs = &nv_query_video_codecs,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a216e625c89c..15ee56406bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -508,7 +508,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
 	if (ras && adev->ras_enabled)
 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
 
-	ret = amdgpu_dpm_baco_reset(adev);
+	ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
 	if (ret)
 		return ret;
 
@@ -553,7 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 	case IP_VERSION(11, 0, 2):
 		if (adev->asic_type == CHIP_VEGA20) {
 			if (adev->psp.sos.fw_version >= 0x80067)
-				baco_reset = amdgpu_dpm_is_baco_supported(adev);
+				baco_reset = amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
 			/*
 			 * 1. PMFW version > 0x284300: all cases use baco
 			 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
@@ -562,7 +562,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 			    adev->pm.fw_version <= 0x283400)
 				baco_reset = false;
 		} else {
-			baco_reset = amdgpu_dpm_is_baco_supported(adev);
+			baco_reset = amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
 		}
 		break;
 	case IP_VERSION(13, 0, 2):
@@ -599,7 +599,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
 		return soc15_asic_baco_reset(adev);
 	case AMD_RESET_METHOD_MODE2:
 		dev_info(adev->dev, "MODE2 reset\n");
-		return amdgpu_dpm_mode2_reset(adev);
+		return amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE2);
 	default:
 		dev_info(adev->dev, "MODE1 reset\n");
 		return amdgpu_device_mode1_reset(adev);
@@ -613,10 +613,10 @@ static bool soc15_supports_baco(struct amdgpu_device *adev)
 	case IP_VERSION(11, 0, 2):
 		if (adev->asic_type == CHIP_VEGA20) {
 			if (adev->psp.sos.fw_version >= 0x80067)
-				return amdgpu_dpm_is_baco_supported(adev);
+				return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
 			return false;
 		} else {
-			return amdgpu_dpm_is_baco_supported(adev);
+			return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
 		}
 		break;
 	default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6645ebbd2696..de510de5e62a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -904,7 +904,7 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
 	case CHIP_TOPAZ:
-		return amdgpu_dpm_is_baco_supported(adev);
+		return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
 	default:
 		return false;
 	}
@@ -930,7 +930,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
 	case CHIP_TOPAZ:
-		baco_reset = amdgpu_dpm_is_baco_supported(adev);
+		baco_reset = amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
 		break;
 	default:
 		baco_reset = false;
@@ -962,7 +962,7 @@ static int vi_asic_reset(struct amdgpu_device *adev)
 
 	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 		dev_info(adev->dev, "BACO reset\n");
-		r = amdgpu_dpm_baco_reset(adev);
+		r = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
 	} else {
 		dev_info(adev->dev, "PCI CONFIG reset\n");
 		r = vi_asic_pci_config_reset(adev);
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 892648a4a353..8d9c32e70532 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -300,6 +300,7 @@ struct amd_pp_clocks;
 struct pp_smu_wm_range_sets;
 struct pp_smu_nv_clock_table;
 struct dpm_clocks;
+enum amd_reset_method;
 
 struct amd_pm_funcs {
 /* export for dpm on ci and si */
@@ -387,12 +388,10 @@ struct amd_pm_funcs {
 	int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
 	int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
 	int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
-	int (*get_asic_baco_capability)(void *handle, bool *cap);
 	int (*get_asic_baco_state)(void *handle, int *state);
 	int (*set_asic_baco_state)(void *handle, int state);
 	int (*get_ppfeature_status)(void *handle, char *buf);
 	int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
-	int (*asic_reset_mode_2)(void *handle);
 	int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
 	int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
 	ssize_t (*get_gpu_metrics)(void *handle, void **table);
@@ -410,6 +409,10 @@ struct amd_pm_funcs {
 	int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
 	void (*pm_compute_clocks)(void *handle);
 	bool (*is_smc_alive)(void *handle);
+	int (*is_asic_reset_supported)(void *handle,
+				       enum amd_reset_method reset_method);
+	int (*asic_reset)(void *handle,
+			  enum amd_reset_method reset_method);
 };
 
 struct metrics_table_header {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index f237dd3a3f66..b72945f6a338 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -196,107 +196,42 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 	return ret;
 }
 
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+int amdgpu_dpm_is_asic_reset_supported(struct amdgpu_device *adev,
+				       enum amd_reset_method reset_method)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	void *pp_handle = adev->powerplay.pp_handle;
-	bool baco_cap;
-	int ret = 0;
+	int reset_supported = false;
 
 	if (!amdgpu_dpm_is_smc_alive(adev))
 		return false;
 
-	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
+	if (!pp_funcs || !pp_funcs->is_asic_reset_supported)
 		return false;
 
 	mutex_lock(&adev->pm.mutex);
-
-	ret = pp_funcs->get_asic_baco_capability(pp_handle,
-						 &baco_cap);
-
+	reset_supported = pp_funcs->is_asic_reset_supported(adev->powerplay.pp_handle,
+							    reset_method);
 	mutex_unlock(&adev->pm.mutex);
 
-	return ret ? false : baco_cap;
+	return reset_supported;
 }
 
-int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
+int amdgpu_dpm_asic_reset(struct amdgpu_device *adev,
+			  enum amd_reset_method reset_method)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	void *pp_handle = adev->powerplay.pp_handle;
 	int ret = 0;
 
 	if (!amdgpu_dpm_is_smc_alive(adev))
 		return -EOPNOTSUPP;
 
-	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
-		return -ENOENT;
-
-	mutex_lock(&adev->pm.mutex);
-
-	ret = pp_funcs->asic_reset_mode_2(pp_handle);
-
-	mutex_unlock(&adev->pm.mutex);
-
-	return ret;
-}
-
-int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
-{
-	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	void *pp_handle = adev->powerplay.pp_handle;
-	int ret = 0;
-
-	if (!amdgpu_dpm_is_smc_alive(adev))
+	if (!pp_funcs || !pp_funcs->asic_reset)
 		return -EOPNOTSUPP;
 
-	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
-		return -ENOENT;
-
 	mutex_lock(&adev->pm.mutex);
-
-	/* enter BACO state */
-	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
-	if (ret)
-		goto out;
-
-	/* exit BACO state */
-	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
-
-out:
+	ret = pp_funcs->asic_reset(adev->powerplay.pp_handle,
+				   reset_method);
 	mutex_unlock(&adev->pm.mutex);
-	return ret;
-}
-
-bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
-{
-	struct smu_context *smu = adev->powerplay.pp_handle;
-	bool support_mode1_reset = false;
-
-	if (!amdgpu_dpm_is_smc_alive(adev))
-		return false;
-
-	if (is_support_sw_smu(adev)) {
-		mutex_lock(&adev->pm.mutex);
-		support_mode1_reset = smu_mode1_reset_is_support(smu);
-		mutex_unlock(&adev->pm.mutex);
-	}
-
-	return support_mode1_reset;
-}
-
-int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
-{
-	struct smu_context *smu = adev->powerplay.pp_handle;
-	int ret = -EOPNOTSUPP;
-
-	if (!amdgpu_dpm_is_smc_alive(adev))
-		return -EOPNOTSUPP;
-
-	if (is_support_sw_smu(adev)) {
-		mutex_lock(&adev->pm.mutex);
-		ret = smu_mode1_reset(smu);
-		mutex_unlock(&adev->pm.mutex);
-	}
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 49488aebd350..bda8b8149497 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -374,15 +374,6 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 				    enum PP_SMC_POWER_PROFILE type,
 				    bool en);
 
-int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
-
-int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
-
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
-
-bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
-int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
-
 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
 			     enum pp_mp1_state mp1_state);
 
@@ -542,4 +533,8 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
 						  unsigned int *num_states);
 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
 				   struct dpm_clocks *clock_table);
+int amdgpu_dpm_is_asic_reset_supported(struct amdgpu_device *adev,
+				       enum amd_reset_method reset_method);
+int amdgpu_dpm_asic_reset(struct amdgpu_device *adev,
+			  enum amd_reset_method reset_method);
 #endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 81ec5464b679..3edc05296e01 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1177,20 +1177,6 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
 	return phm_set_active_display_count(hwmgr, count);
 }
 
-static int pp_get_asic_baco_capability(void *handle, bool *cap)
-{
-	struct pp_hwmgr *hwmgr = handle;
-
-	*cap = false;
-
-	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
-		return 0;
-
-	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
-
-	return 0;
-}
-
 static int pp_get_asic_baco_state(void *handle, int *state)
 {
 	struct pp_hwmgr *hwmgr = handle;
@@ -1242,18 +1228,6 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
 	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
 }
 
-static int pp_asic_reset_mode_2(void *handle)
-{
-	struct pp_hwmgr *hwmgr = handle;
-
-	if (hwmgr->hwmgr_func->asic_reset == NULL) {
-		pr_info_ratelimited("%s was not implemented.\n", __func__);
-		return -EINVAL;
-	}
-
-	return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
-}
-
 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
 {
 	struct pp_hwmgr *hwmgr = handle;
@@ -1394,6 +1368,62 @@ static bool pp_is_smc_alive(void *handle)
 	return false;
 }
 
+static int pp_is_asic_reset_supported(void *handle,
+				       enum amd_reset_method reset_method)
+{
+	struct pp_hwmgr *hwmgr = handle;
+	bool reset_supported = false;
+
+	switch (reset_method) {
+	case AMD_RESET_METHOD_BACO:
+		if (hwmgr->hwmgr_func->get_asic_baco_capability)
+			hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr,
+								    &reset_supported);
+		break;
+	case AMD_RESET_METHOD_MODE1:
+	case AMD_RESET_METHOD_MODE2:
+	default:
+		break;
+	}
+
+	return reset_supported;
+}
+
+static int pp_asic_reset(void *handle,
+			 enum amd_reset_method reset_method)
+{
+	struct pp_hwmgr *hwmgr = handle;
+	int ret = 0;
+
+	switch (reset_method) {
+	case AMD_RESET_METHOD_MODE1:
+		return -EOPNOTSUPP;
+	case AMD_RESET_METHOD_MODE2:
+		if (!hwmgr->hwmgr_func->asic_reset)
+			return -EOPNOTSUPP;
+
+		ret = hwmgr->hwmgr_func->asic_reset(hwmgr,
+						    SMU_ASIC_RESET_MODE_2);
+		break;
+	case AMD_RESET_METHOD_BACO:
+		if (!hwmgr->hwmgr_func->set_asic_baco_state)
+			return -EOPNOTSUPP;
+
+		ret = hwmgr->hwmgr_func->set_asic_baco_state(hwmgr,
+							     BACO_STATE_IN);
+		if (ret)
+			return ret;
+
+		ret = hwmgr->hwmgr_func->set_asic_baco_state(hwmgr,
+							     BACO_STATE_OUT);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
 static const struct amd_pm_funcs pp_dpm_funcs = {
 	.load_firmware = pp_dpm_load_fw,
 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1446,12 +1476,10 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
 	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
 	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
 	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
-	.get_asic_baco_capability = pp_get_asic_baco_capability,
 	.get_asic_baco_state = pp_get_asic_baco_state,
 	.set_asic_baco_state = pp_set_asic_baco_state,
 	.get_ppfeature_status = pp_get_ppfeature_status,
 	.set_ppfeature_status = pp_set_ppfeature_status,
-	.asic_reset_mode_2 = pp_asic_reset_mode_2,
 	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
 	.set_df_cstate = pp_set_df_cstate,
 	.set_xgmi_pstate = pp_set_xgmi_pstate,
@@ -1460,4 +1488,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
 	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
 	.pm_compute_clocks = pp_pm_compute_clocks,
 	.is_smc_alive = pp_is_smc_alive,
+	.is_asic_reset_supported = pp_is_asic_reset_supported,
+	.asic_reset = pp_asic_reset,
 };
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 3773e95a18bf..bab5ddc667f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2503,18 +2503,6 @@ static int smu_set_xgmi_pstate(void *handle,
 	return ret;
 }
 
-static int smu_get_baco_capability(void *handle, bool *cap)
-{
-	struct smu_context *smu = handle;
-
-	*cap = false;
-
-	if (smu->ppt_funcs->baco_is_support)
-		*cap = smu->ppt_funcs->baco_is_support(smu);
-
-	return 0;
-}
-
 static int smu_baco_set_state(void *handle, int state)
 {
 	struct smu_context *smu = handle;
@@ -2537,40 +2525,6 @@ static int smu_baco_set_state(void *handle, int state)
 	return ret;
 }
 
-bool smu_mode1_reset_is_support(struct smu_context *smu)
-{
-	bool ret = false;
-
-	if (smu->ppt_funcs->mode1_reset_is_support)
-		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
-
-	return ret;
-}
-
-int smu_mode1_reset(struct smu_context *smu)
-{
-	int ret = 0;
-
-	if (smu->ppt_funcs->mode1_reset)
-		ret = smu->ppt_funcs->mode1_reset(smu);
-
-	return ret;
-}
-
-static int smu_mode2_reset(void *handle)
-{
-	struct smu_context *smu = handle;
-	int ret = 0;
-
-	if (smu->ppt_funcs->mode2_reset)
-		ret = smu->ppt_funcs->mode2_reset(smu);
-
-	if (ret)
-		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
-
-	return ret;
-}
-
 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
 						struct pp_smu_nv_clock_table *max_clocks)
 {
@@ -2705,6 +2659,82 @@ static bool smu_is_smc_alive(void *handle)
 	return false;
 }
 
+static int smu_is_asic_reset_supported(void *handle,
+				       enum amd_reset_method reset_method)
+{
+	struct smu_context *smu = handle;
+	struct amdgpu_device *adev = smu->adev;
+	int reset_supported = false;
+
+	switch (reset_method) {
+	case AMD_RESET_METHOD_MODE1:
+		if (smu->ppt_funcs->mode1_reset_is_support)
+			reset_supported = smu->ppt_funcs->mode1_reset_is_support(smu);
+		break;
+	case AMD_RESET_METHOD_MODE2:
+		switch (adev->ip_versions[MP1_HWIP][0]) {
+		case IP_VERSION(11, 5, 0):
+		case IP_VERSION(12, 0, 0):
+		case IP_VERSION(12, 0, 1):
+		case IP_VERSION(13, 0, 2):
+		case IP_VERSION(13, 0, 1):
+		case IP_VERSION(13, 0, 3):
+			reset_supported = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	case AMD_RESET_METHOD_BACO:
+		if (smu->ppt_funcs->baco_is_support)
+			reset_supported = smu->ppt_funcs->baco_is_support(smu);
+		break;
+	default:
+		break;
+	}
+
+	return reset_supported;
+}
+
+static int smu_asic_reset(void *handle,
+			  enum amd_reset_method reset_method)
+{
+	struct smu_context *smu = handle;
+	int ret = 0;
+
+	switch (reset_method) {
+	case AMD_RESET_METHOD_MODE1:
+		if (!smu->ppt_funcs->mode1_reset)
+			return -EOPNOTSUPP;
+
+		ret = smu->ppt_funcs->mode1_reset(smu);
+		break;
+	case AMD_RESET_METHOD_MODE2:
+		if (!smu->ppt_funcs->mode2_reset)
+			return -EOPNOTSUPP;
+
+		ret = smu->ppt_funcs->mode2_reset(smu);
+		if (ret)
+			dev_err(smu->adev->dev, "Mode2 reset failed!\n");
+		break;
+	case AMD_RESET_METHOD_BACO:
+		if (!smu->ppt_funcs->baco_enter ||
+		    !smu->ppt_funcs->baco_exit)
+			return -EOPNOTSUPP;
+
+		ret = smu->ppt_funcs->baco_enter(smu);
+		if (ret)
+			return ret;
+
+		ret = smu->ppt_funcs->baco_exit(smu);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
 static const struct amd_pm_funcs swsmu_pm_funcs = {
 	/* export for sysfs */
 	.set_fan_control_mode    = smu_set_fan_control_mode,
@@ -2744,11 +2774,9 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
 	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
 	.set_active_display_count         = smu_set_display_count,
 	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
-	.get_asic_baco_capability         = smu_get_baco_capability,
 	.set_asic_baco_state              = smu_baco_set_state,
 	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
 	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
-	.asic_reset_mode_2                = smu_mode2_reset,
 	.set_df_cstate                    = smu_set_df_cstate,
 	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
 	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
@@ -2759,6 +2787,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
 	.get_dpm_clock_table              = smu_get_dpm_clock_table,
 	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
 	.is_smc_alive = smu_is_smc_alive,
+	.is_asic_reset_supported = smu_is_asic_reset_supported,
+	.asic_reset              = smu_asic_reset,
 };
 
 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index bced761f3f96..ce9cd0522a40 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1392,9 +1392,6 @@ int smu_get_power_limit(void *handle,
 			enum pp_power_limit_level pp_limit_level,
 			enum pp_power_type pp_power_type);
 
-bool smu_mode1_reset_is_support(struct smu_context *smu);
-int smu_mode1_reset(struct smu_context *smu);
-
 extern const struct amd_ip_funcs smu_ip_funcs;
 
 bool is_support_sw_smu(struct amdgpu_device *adev);
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH 01/12] drm/amd/pm: drop unused structure members
  2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
                   ` (10 preceding siblings ...)
  2022-02-11  7:52 ` [PATCH 12/12] drm/amd/pm: revise the implementations for asic reset Evan Quan
@ 2022-02-11  7:55 ` Christian König
  11 siblings, 0 replies; 23+ messages in thread
From: Christian König @ 2022-02-11  7:55 UTC (permalink / raw)
  To: Evan Quan, amd-gfx; +Cc: Alexander.Deucher, Lijo.Lazar, rui.huang

Nice cleanup.

Can't say much about the rest, but this patch and patch #2 are 
Reviewed-by: Christian König <christian.koenig@amd.com>

Regards,
Christian.

Am 11.02.22 um 08:51 schrieb Evan Quan:
> Drop those members which get never used.
>
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: Iec70ad1dfe2059be26843f378588e6c894e9cae8
> ---
>   drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 2 --
>   1 file changed, 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index fbef3ab8d487..fb32846a2d0e 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -373,8 +373,6 @@ struct smu_dpm_context {
>   };
>   
>   struct smu_power_gate {
> -	bool uvd_gated;
> -	bool vce_gated;
>   	atomic_t vcn_gated;
>   	atomic_t jpeg_gated;
>   };


^ permalink raw reply	[flat|nested] 23+ messages in thread

* RE: [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c
  2022-02-11  7:52 ` [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c Evan Quan
@ 2022-02-11  8:06   ` Chen, Guchun
  2022-02-17  1:53     ` Quan, Evan
  2022-02-11 13:39   ` Lazar, Lijo
  1 sibling, 1 reply; 23+ messages in thread
From: Chen, Guchun @ 2022-02-11  8:06 UTC (permalink / raw)
  To: Quan, Evan, amd-gfx
  Cc: Deucher, Alexander, Lazar, Lijo, Huang, Ray, Quan, Evan

[Public]

mutex_lock(&adev->pm.mutex);
-	ret = smu_write_watermarks_table(smu);
+	ret = pp_funcs->set_watermarks_for_clock_ranges(adev->powerplay.pp_handle,
+							NULL);
 	mutex_unlock(&adev->pm.mutex);

I guess we should separate this from this patch, and send another patch to address it.

Regards,
Guchun

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Evan Quan
Sent: Friday, February 11, 2022 3:52 PM
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo <Lijo.Lazar@amd.com>; Quan, Evan <Evan.Quan@amd.com>; rui.huang@amd.com
Subject: [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c

Instead of checking this in every instance(framework), moving that check to
amdgpu_dpm.c is more proper. And that can make code clean and tidy.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Change-Id: I2f83a3b860e8aa12cc86f119011f520fbe21a301
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |   5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |  16 +-
 drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 277 ++++++++++++++++--
 drivers/gpu/drm/amd/pm/amdgpu_pm.c            |  25 +-
 drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  12 +-
 .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c    |   4 -
 .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 117 ++++----
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 135 +--------
 drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   1 -
 9 files changed, 352 insertions(+), 240 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 2c929fa40379..fff0e6a3882e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -261,11 +261,14 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
 {
 	struct amdgpu_device *adev = ctx->adev;
 	enum amd_dpm_forced_level current_level;
+	int ret = 0;
 
 	if (!ctx)
 		return -EINVAL;
 
-	current_level = amdgpu_dpm_get_performance_level(adev);
+	ret = amdgpu_dpm_get_performance_level(adev, &current_level);
+	if (ret)
+		return ret;
 
 	switch (current_level) {
 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9f985bd463be..56144f25b720 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -813,15 +813,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		unsigned i;
 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
 		struct amd_vce_state *vce_state;
+		int ret = 0;
 
 		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
-			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
-			if (vce_state) {
-				vce_clk_table.entries[i].sclk = vce_state->sclk;
-				vce_clk_table.entries[i].mclk = vce_state->mclk;
-				vce_clk_table.entries[i].eclk = vce_state->evclk;
-				vce_clk_table.num_valid_entries++;
-			}
+			ret = amdgpu_dpm_get_vce_clock_state(adev, i, vce_state);
+			if (ret)
+				return ret;
+
+			vce_clk_table.entries[i].sclk = vce_state->sclk;
+			vce_clk_table.entries[i].mclk = vce_state->mclk;
+			vce_clk_table.entries[i].eclk = vce_state->evclk;
+			vce_clk_table.num_valid_entries++;
 		}
 
 		return copy_to_user(out, &vce_clk_table,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 1d63f1e8884c..b46ae0063047 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -41,6 +41,9 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return 0;
+
 	if (!pp_funcs->get_sclk)
 		return 0;
 
@@ -57,6 +60,9 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return 0;
+
 	if (!pp_funcs->get_mclk)
 		return 0;
 
@@ -74,6 +80,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
 
+	if (!adev->pm.dpm_enabled) {
+		dev_WARN(adev->dev,
+			 "SMU uninitialized but power %s requested for %u!\n",
+			 gate ? "gate" : "ungate", block_type);
+		return -EOPNOTSUPP;
+	}
+
 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
 				block_type, gate ? "gate" : "ungate");
@@ -261,6 +274,9 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (amdgpu_sriov_vf(adev))
 		return 0;
 
@@ -280,6 +296,9 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
@@ -297,6 +316,9 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	void *pp_handle = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->set_df_cstate) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
@@ -311,6 +333,9 @@ int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev)) {
 		mutex_lock(&adev->pm.mutex);
 		ret = smu_allow_xgmi_power_down(smu, en);
@@ -327,6 +352,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
 			adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
@@ -344,6 +372,9 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
 			adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
@@ -362,6 +393,9 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 			adev->powerplay.pp_funcs;
 	int ret = -EOPNOTSUPP;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
 		mutex_lock(&adev->pm.mutex);
 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
@@ -398,6 +432,9 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = -EINVAL;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!data || !size)
 		return -EINVAL;
 
@@ -485,6 +522,9 @@ int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 {
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev)) {
 		mutex_lock(&adev->pm.mutex);
 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
@@ -500,6 +540,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	mutex_lock(&adev->pm.mutex);
 	ret = smu_send_hbm_bad_pages_num(smu, size);
 	mutex_unlock(&adev->pm.mutex);
@@ -514,6 +557,9 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
 {
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (type != PP_SCLK)
 		return -EINVAL;
 
@@ -538,6 +584,9 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (type != PP_SCLK)
 		return -EINVAL;
 
@@ -556,14 +605,18 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
 
 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 {
-	struct smu_context *smu = adev->powerplay.pp_handle;
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return 0;
 
 	mutex_lock(&adev->pm.mutex);
-	ret = smu_write_watermarks_table(smu);
+	ret = pp_funcs->set_watermarks_for_clock_ranges(adev->powerplay.pp_handle,
+							NULL);
 	mutex_unlock(&adev->pm.mutex);
 
 	return ret;
@@ -576,6 +629,9 @@ int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return -EOPNOTSUPP;
 
@@ -591,6 +647,9 @@ int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 	struct smu_context *smu = adev->powerplay.pp_handle;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return -EOPNOTSUPP;
 
@@ -605,6 +664,9 @@ uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 {
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return 0;
+
 	if (!is_support_sw_smu(adev))
 		return 0;
 
@@ -619,6 +681,9 @@ uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 				 enum gfx_change_state state)
 {
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	mutex_lock(&adev->pm.mutex);
 	if (adev->powerplay.pp_funcs &&
 	    adev->powerplay.pp_funcs->gfx_state_change_set)
@@ -632,27 +697,33 @@ int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 {
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!is_support_sw_smu(adev))
 		return -EOPNOTSUPP;
 
 	return smu_get_ecc_info(smu, umc_ecc);
 }
 
-struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
-						     uint32_t idx)
+int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+				   uint32_t idx,
+				   struct amd_vce_state *vstate)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	struct amd_vce_state *vstate = NULL;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	if (!pp_funcs->get_vce_clock_state)
-		return NULL;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
 					       idx);
 	mutex_unlock(&adev->pm.mutex);
 
-	return vstate;
+	return 0;
 }
 
 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
@@ -660,6 +731,9 @@ void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	mutex_lock(&adev->pm.mutex);
 
 	if (!pp_funcs->get_current_power_state) {
@@ -679,6 +753,9 @@ void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 				enum amd_pm_state_type state)
 {
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	mutex_lock(&adev->pm.mutex);
 	adev->pm.dpm.user_state = state;
 	mutex_unlock(&adev->pm.mutex);
@@ -692,19 +769,22 @@ void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 		amdgpu_dpm_compute_clocks(adev);
 }
 
-enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
+int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
+				     enum amd_dpm_forced_level *level)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	enum amd_dpm_forced_level level;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	if (pp_funcs->get_performance_level)
-		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
+		*level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
 	else
-		level = adev->pm.dpm.forced_level;
+		*level = adev->pm.dpm.forced_level;
 	mutex_unlock(&adev->pm.mutex);
 
-	return level;
+	return 0;
 }
 
 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
@@ -717,13 +797,16 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->force_performance_level)
 		return 0;
 
 	if (adev->pm.dpm.thermal_active)
 		return -EINVAL;
 
-	current_level = amdgpu_dpm_get_performance_level(adev);
+	amdgpu_dpm_get_performance_level(adev, &current_level);
 	if (current_level == level)
 		return 0;
 
@@ -783,6 +866,9 @@ int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_pp_num_states)
 		return -EOPNOTSUPP;
 
@@ -801,6 +887,9 @@ int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->dispatch_tasks)
 		return -EOPNOTSUPP;
 
@@ -818,6 +907,9 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_pp_table)
 		return 0;
 
@@ -837,6 +929,9 @@ int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fine_grain_clk_vol)
 		return 0;
 
@@ -858,6 +953,9 @@ int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->odn_edit_dpm_table)
 		return 0;
 
@@ -878,6 +976,9 @@ int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->print_clock_levels)
 		return 0;
 
@@ -917,6 +1018,9 @@ int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_ppfeature_status)
 		return 0;
 
@@ -933,6 +1037,9 @@ int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_ppfeature_status)
 		return 0;
 
@@ -951,6 +1058,9 @@ int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->force_clock_level)
 		return 0;
 
@@ -963,27 +1073,33 @@ int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
 	return ret;
 }
 
-int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev,
+			   uint32_t *value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	int ret = 0;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	if (!pp_funcs->get_sclk_od)
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
-	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
+	*value = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
 	mutex_unlock(&adev->pm.mutex);
 
-	return ret;
+	return 0;
 }
 
 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev))
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	if (pp_funcs->set_sclk_od)
@@ -1000,27 +1116,33 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
 	return 0;
 }
 
-int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev,
+			   uint32_t *value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-	int ret = 0;
+
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
 
 	if (!pp_funcs->get_mclk_od)
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
-	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
+	*value = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
 	mutex_unlock(&adev->pm.mutex);
 
-	return ret;
+	return 0;
 }
 
 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (is_support_sw_smu(adev))
-		return 0;
+		return -EOPNOTSUPP;
 
 	mutex_lock(&adev->pm.mutex);
 	if (pp_funcs->set_mclk_od)
@@ -1043,6 +1165,9 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_power_profile_mode)
 		return -EOPNOTSUPP;
 
@@ -1060,6 +1185,9 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_power_profile_mode)
 		return 0;
 
@@ -1077,6 +1205,9 @@ int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_gpu_metrics)
 		return 0;
 
@@ -1094,6 +1225,9 @@ int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -1111,6 +1245,9 @@ int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -1128,6 +1265,9 @@ int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -1145,6 +1285,9 @@ int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -1162,6 +1305,9 @@ int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -1179,6 +1325,9 @@ int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -1198,6 +1347,9 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_power_limit)
 		return -ENODATA;
 
@@ -1217,6 +1369,9 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_power_limit)
 		return -EINVAL;
 
@@ -1232,6 +1387,9 @@ int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
 {
 	bool cclk_dpm_supported = false;
 
+	if (!adev->pm.dpm_enabled)
+		return false;
+
 	if (!is_support_sw_smu(adev))
 		return false;
 
@@ -1247,6 +1405,9 @@ int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *ade
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->debugfs_print_current_performance_level)
 		return -EOPNOTSUPP;
 
@@ -1265,6 +1426,9 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_smu_prv_buf_details)
 		return -ENOSYS;
 
@@ -1282,6 +1446,9 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return false;
+
 	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
 	    (is_support_sw_smu(adev) && smu->is_apu) ||
 		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
@@ -1297,6 +1464,9 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_pp_table)
 		return -EOPNOTSUPP;
 
@@ -1313,6 +1483,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
 {
 	struct smu_context *smu = adev->powerplay.pp_handle;
 
+	if (!adev->pm.dpm_enabled)
+		return INT_MAX;
+
 	if (!is_support_sw_smu(adev))
 		return INT_MAX;
 
@@ -1321,6 +1494,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
 
 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
 {
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!is_support_sw_smu(adev))
 		return;
 
@@ -1333,6 +1509,9 @@ int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->display_configuration_change)
 		return 0;
 
@@ -1351,6 +1530,9 @@ int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_clock_by_type)
 		return 0;
 
@@ -1369,6 +1551,9 @@ int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_display_mode_validation_clocks)
 		return 0;
 
@@ -1387,6 +1572,9 @@ int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_clock_by_type_with_latency)
 		return 0;
 
@@ -1406,6 +1594,9 @@ int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_clock_by_type_with_voltage)
 		return 0;
 
@@ -1424,6 +1615,9 @@ int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
 		return -EOPNOTSUPP;
 
@@ -1441,6 +1635,9 @@ int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->display_clock_voltage_request)
 		return -EOPNOTSUPP;
 
@@ -1458,6 +1655,9 @@ int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_current_clocks)
 		return -EOPNOTSUPP;
 
@@ -1473,6 +1673,9 @@ void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!pp_funcs->notify_smu_enable_pwe)
 		return;
 
@@ -1487,6 +1690,9 @@ int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_active_display_count)
 		return -EOPNOTSUPP;
 
@@ -1504,6 +1710,9 @@ int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
 		return -EOPNOTSUPP;
 
@@ -1520,6 +1729,9 @@ void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
 		return;
 
@@ -1534,6 +1746,9 @@ void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
 {
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!adev->pm.dpm_enabled)
+		return;
+
 	if (!pp_funcs->set_hard_min_fclk_by_freq)
 		return;
 
@@ -1549,6 +1764,9 @@ int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->display_disable_memory_clock_switch)
 		return 0;
 
@@ -1566,6 +1784,9 @@ int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
 		return -EOPNOTSUPP;
 
@@ -1584,6 +1805,9 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_uclk_dpm_states)
 		return -EOPNOTSUPP;
 
@@ -1602,6 +1826,9 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 	int ret = 0;
 
+	if (!adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
 	if (!pp_funcs->get_dpm_clock_table)
 		return -EOPNOTSUPP;
 
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b0243068212b..84aab3bb9bdc 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -273,11 +273,14 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
 		return ret;
 	}
 
-	level = amdgpu_dpm_get_performance_level(adev);
+	ret = amdgpu_dpm_get_performance_level(adev, &level);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return sysfs_emit(buf, "%s\n",
 			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
 			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -1241,11 +1244,14 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
 		return ret;
 	}
 
-	value = amdgpu_dpm_get_sclk_od(adev);
+	ret = amdgpu_dpm_get_sclk_od(adev, &value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return sysfs_emit(buf, "%d\n", value);
 }
 
@@ -1275,11 +1281,14 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
 		return ret;
 	}
 
-	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+	ret = amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return count;
 }
 
@@ -1303,11 +1312,14 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
 		return ret;
 	}
 
-	value = amdgpu_dpm_get_mclk_od(adev);
+	ret = amdgpu_dpm_get_mclk_od(adev, &value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return sysfs_emit(buf, "%d\n", value);
 }
 
@@ -1337,11 +1349,14 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
 		return ret;
 	}
 
-	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+	ret = amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
 
 	pm_runtime_mark_last_busy(ddev->dev);
 	pm_runtime_put_autosuspend(ddev->dev);
 
+	if (ret)
+		return ret;
+
 	return count;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index ddfa55b59d02..49488aebd350 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -429,12 +429,14 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
 				 enum gfx_change_state state);
 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
 			    void *umc_ecc);
-struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
-						     uint32_t idx);
+int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+				   uint32_t idx,
+				   struct amd_vce_state *vstate);
 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state);
 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
 				enum amd_pm_state_type state);
-enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
+int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
+				     enum amd_dpm_forced_level *level);
 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
 				       enum amd_dpm_forced_level level);
 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
@@ -464,9 +466,9 @@ int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf);
 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
 				 enum pp_clock_type type,
 				 uint32_t mask);
-int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev, uint32_t *value);
 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
-int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev, uint32_t *value);
 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value);
 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
 				      char *buf);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 9613c6181c17..59550617cf54 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -959,10 +959,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 	int ret;
 	bool equal = false;
 
-	/* if dpm init failed */
-	if (!adev->pm.dpm_enabled)
-		return 0;
-
 	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
 		/* add other state override checks here */
 		if ((!adev->pm.dpm.thermal_active) &&
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 991ac4adb263..bba923cfe08c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -295,7 +295,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
@@ -335,7 +335,7 @@ static int pp_dpm_force_performance_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (level == hwmgr->dpm_level)
@@ -353,7 +353,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	return hwmgr->dpm_level;
@@ -363,7 +363,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
@@ -377,7 +377,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return 0;
 
 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
@@ -391,7 +391,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
@@ -405,7 +405,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
@@ -420,7 +420,7 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	return hwmgr_handle_task(hwmgr, task_id, user_state);
@@ -432,7 +432,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 	struct pp_power_state *state;
 	enum amd_pm_state_type pm_type;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->current_ps)
+	if (!hwmgr || !hwmgr->current_ps)
 		return -EINVAL;
 
 	state = hwmgr->current_ps;
@@ -462,7 +462,7 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
@@ -480,7 +480,7 @@ static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
@@ -497,7 +497,7 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
@@ -513,7 +513,7 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
@@ -529,7 +529,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
@@ -545,7 +545,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
@@ -565,7 +565,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
 
 	memset(data, 0, sizeof(*data));
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->ps)
+	if (!hwmgr || !hwmgr->ps)
 		return -EINVAL;
 
 	data->nums = hwmgr->num_ps;
@@ -597,7 +597,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->soft_pp_table)
+	if (!hwmgr || !hwmgr->soft_pp_table)
 		return -EINVAL;
 
 	*table = (char *)hwmgr->soft_pp_table;
@@ -625,7 +625,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = -ENOMEM;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (!hwmgr->hardcode_pp_table) {
@@ -655,7 +655,7 @@ static int pp_dpm_force_clock_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
@@ -676,7 +676,7 @@ static int pp_dpm_print_clock_levels(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
@@ -690,7 +690,7 @@ static int pp_dpm_get_sclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
@@ -704,7 +704,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
@@ -719,7 +719,7 @@ static int pp_dpm_get_mclk_od(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
@@ -733,7 +733,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
@@ -748,7 +748,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !value)
+	if (!hwmgr || !value)
 		return -EINVAL;
 
 	switch (idx) {
@@ -774,7 +774,7 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return NULL;
 
 	if (idx < hwmgr->num_vce_state_tables)
@@ -786,7 +786,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_power_profile_mode)
+	if (!hwmgr || !hwmgr->hwmgr_func->get_power_profile_mode)
 		return -EOPNOTSUPP;
 	if (!buf)
 		return -EINVAL;
@@ -798,7 +798,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_power_profile_mode)
+	if (!hwmgr || !hwmgr->hwmgr_func->set_power_profile_mode)
 		return -EOPNOTSUPP;
 
 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -813,7 +813,7 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
@@ -826,7 +826,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
@@ -860,7 +860,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
@@ -900,7 +900,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 	struct pp_hwmgr *hwmgr = handle;
 	uint32_t max_power_limit;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
@@ -932,7 +932,7 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!limit)
+	if (!hwmgr || !limit)
 		return -EINVAL;
 
 	if (power_type != PP_PWR_TYPE_SUSTAINED)
@@ -965,7 +965,7 @@ static int pp_display_configuration_change(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	phm_store_dal_configuration_data(hwmgr, display_config);
@@ -977,7 +977,7 @@ static int pp_get_display_power_level(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!output)
+	if (!hwmgr || !output)
 		return -EINVAL;
 
 	return phm_get_dal_power_level(hwmgr, output);
@@ -991,7 +991,7 @@ static int pp_get_current_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	phm_get_dal_power_level(hwmgr, &simple_clocks);
@@ -1035,7 +1035,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (clocks == NULL)
@@ -1050,7 +1050,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
+	if (!hwmgr || !clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
@@ -1062,7 +1062,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
+	if (!hwmgr || !clocks)
 		return -EINVAL;
 
 	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
@@ -1073,7 +1073,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !clock_ranges)
+	if (!hwmgr || !clock_ranges)
 		return -EINVAL;
 
 	return phm_set_watermarks_for_clocks_ranges(hwmgr,
@@ -1085,7 +1085,7 @@ static int pp_display_clock_voltage_request(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clock)
+	if (!hwmgr || !clock)
 		return -EINVAL;
 
 	return phm_display_clock_voltage_request(hwmgr, clock);
@@ -1097,7 +1097,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
+	if (!hwmgr || !clocks)
 		return -EINVAL;
 
 	clocks->level = PP_DAL_POWERLEVEL_7;
@@ -1112,7 +1112,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
@@ -1127,7 +1127,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return 0;
 
 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
@@ -1142,7 +1142,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
@@ -1208,7 +1208,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
@@ -1228,8 +1228,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||
-	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
+	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
 		return 0;
 
 	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
@@ -1241,7 +1240,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
@@ -1258,7 +1257,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
@@ -1275,7 +1274,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
@@ -1292,7 +1291,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	return phm_set_active_display_count(hwmgr, count);
@@ -1350,7 +1349,7 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !buf)
+	if (!hwmgr || !buf)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
@@ -1365,7 +1364,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
@@ -1395,7 +1394,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
@@ -1413,7 +1412,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_df_cstate)
+	if (!hwmgr->hwmgr_func->set_df_cstate)
 		return 0;
 
 	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
@@ -1428,7 +1427,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_xgmi_pstate)
+	if (!hwmgr->hwmgr_func->set_xgmi_pstate)
 		return 0;
 
 	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
@@ -1443,7 +1442,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
 	if (!hwmgr)
 		return -EINVAL;
 
-	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_gpu_metrics)
+	if (!hwmgr->hwmgr_func->get_gpu_metrics)
 		return -EOPNOTSUPP;
 
 	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
@@ -1453,7 +1452,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
+	if (!hwmgr)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 96a3388c2cb7..97c57a6cf314 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -68,9 +68,6 @@ static int smu_sys_get_pp_feature_mask(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_get_pp_feature_mask(smu, buf);
 }
 
@@ -79,9 +76,6 @@ static int smu_sys_set_pp_feature_mask(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_set_pp_feature_mask(smu, new_mask);
 }
 
@@ -219,13 +213,6 @@ static int smu_dpm_set_power_gate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled) {
-		dev_WARN(smu->adev->dev,
-			 "SMU uninitialized but power %s requested for %u!\n",
-			 gate ? "gate" : "ungate", block_type);
-		return -EOPNOTSUPP;
-	}
-
 	switch (block_type) {
 	/*
 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
@@ -315,9 +302,6 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
 	if (!smu->adev->in_suspend)
 		return;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return;
-
 	/* Enable restore flag */
 	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
 
@@ -428,9 +412,6 @@ static int smu_sys_get_pp_table(void *handle,
 	struct smu_context *smu = handle;
 	struct smu_table_context *smu_table = &smu->smu_table;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
 		return -EINVAL;
 
@@ -451,9 +432,6 @@ static int smu_sys_set_pp_table(void *handle,
 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (header->usStructureSize != size) {
 		dev_err(smu->adev->dev, "pp table size not matched !\n");
 		return -EIO;
@@ -1564,9 +1542,6 @@ static int smu_display_configuration_change(void *handle,
 	int index = 0;
 	int num_of_active_display = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!display_config)
 		return -EINVAL;
 
@@ -1704,9 +1679,6 @@ static int smu_handle_task(struct smu_context *smu,
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	switch (task_id) {
 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
 		ret = smu_pre_display_config_changed(smu);
@@ -1745,9 +1717,6 @@ static int smu_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
 		return -EINVAL;
 
@@ -1775,9 +1744,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
 	struct smu_context *smu = handle;
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
 		return -EINVAL;
 
@@ -1791,9 +1757,6 @@ static int smu_force_performance_level(void *handle,
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
 		return -EINVAL;
 
@@ -1817,9 +1780,6 @@ static int smu_set_display_count(void *handle, uint32_t count)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_init_display_count(smu, count);
 }
 
@@ -1830,9 +1790,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
 		return -EINVAL;
@@ -1917,9 +1874,6 @@ static int smu_set_df_cstate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
 		return 0;
 
@@ -1934,9 +1888,6 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
 		return 0;
 
@@ -1947,22 +1898,11 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 	return ret;
 }
 
-int smu_write_watermarks_table(struct smu_context *smu)
-{
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
-	return smu_set_watermarks_table(smu, NULL);
-}
-
 static int smu_set_watermarks_for_clock_ranges(void *handle,
 					       struct pp_smu_wm_range_sets *clock_ranges)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->disable_watermark)
 		return 0;
 
@@ -1973,9 +1913,6 @@ int smu_set_ac_dc(struct smu_context *smu)
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	/* controlled by firmware */
 	if (smu->dc_controlled_by_gpio)
 		return 0;
@@ -2083,9 +2020,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->set_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -2126,9 +2060,6 @@ int smu_get_power_limit(void *handle,
 	uint32_t limit_type;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	switch(pp_power_type) {
 	case PP_PWR_TYPE_SUSTAINED:
 		limit_type = SMU_DEFAULT_PPT_LIMIT;
@@ -2199,9 +2130,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
 	uint32_t limit_type = limit >> 24;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	limit &= (1<<24)-1;
 	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
 		if (smu->ppt_funcs->set_power_limit)
@@ -2230,9 +2158,6 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
 {
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->print_clk_levels)
 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
 
@@ -2319,9 +2244,6 @@ static int smu_od_edit_dpm_table(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->od_edit_dpm_table) {
 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
 	}
@@ -2340,9 +2262,6 @@ static int smu_read_sensor(void *handle,
 	int ret = 0;
 	uint32_t *size, size_val;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!data || !size_arg)
 		return -EINVAL;
 
@@ -2399,8 +2318,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled ||
-	    !smu->ppt_funcs->get_power_profile_mode)
+	if (!smu->ppt_funcs->get_power_profile_mode)
 		return -EOPNOTSUPP;
 	if (!buf)
 		return -EINVAL;
@@ -2414,8 +2332,7 @@ static int smu_set_power_profile_mode(void *handle,
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled ||
-	    !smu->ppt_funcs->set_power_profile_mode)
+	if (!smu->ppt_funcs->set_power_profile_mode)
 		return -EOPNOTSUPP;
 
 	return smu_bump_power_profile_mode(smu, param, param_size);
@@ -2426,9 +2343,6 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -2445,9 +2359,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->set_fan_control_mode)
 		return -EOPNOTSUPP;
 
@@ -2478,9 +2389,6 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -2497,9 +2405,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->set_fan_speed_pwm)
 		return -EOPNOTSUPP;
 
@@ -2524,9 +2429,6 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_fan_speed_rpm)
 		return -EOPNOTSUPP;
 
@@ -2542,9 +2444,6 @@ static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	return smu_set_min_dcef_deep_sleep(smu, clk);
 }
 
@@ -2556,9 +2455,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
 	enum smu_clk_type clk_type;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
 		switch (type) {
 		case amd_pp_sys_clock:
@@ -2590,9 +2486,6 @@ static int smu_display_clock_voltage_request(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->display_clock_voltage_request)
 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
 
@@ -2606,9 +2499,6 @@ static int smu_display_disable_memory_clock_switch(void *handle,
 	struct smu_context *smu = handle;
 	int ret = -EINVAL;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
 
@@ -2621,9 +2511,6 @@ static int smu_set_xgmi_pstate(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->set_xgmi_pstate)
 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
 
@@ -2722,9 +2609,6 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
 
@@ -2738,9 +2622,6 @@ static int smu_get_uclk_dpm_states(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_uclk_dpm_states)
 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
 
@@ -2752,9 +2633,6 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
 	struct smu_context *smu = handle;
 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_current_power_state)
 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
 
@@ -2767,9 +2645,6 @@ static int smu_get_dpm_clock_table(void *handle,
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->get_dpm_clock_table)
 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
 
@@ -2780,9 +2655,6 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
 {
 	struct smu_context *smu = handle;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (!smu->ppt_funcs->get_gpu_metrics)
 		return -EOPNOTSUPP;
 
@@ -2794,9 +2666,6 @@ static int smu_enable_mgpu_fan_boost(void *handle)
 	struct smu_context *smu = handle;
 	int ret = 0;
 
-	if (!smu->adev->pm.dpm_enabled)
-		return -EOPNOTSUPP;
-
 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 39d169440d15..bced761f3f96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1399,7 +1399,6 @@ extern const struct amd_ip_funcs smu_ip_funcs;
 
 bool is_support_sw_smu(struct amdgpu_device *adev);
 bool is_support_cclk_dpm(struct amdgpu_device *adev);
-int smu_write_watermarks_table(struct smu_context *smu);
 
 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			   uint32_t *min, uint32_t *max);
-- 
2.29.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH 12/12] drm/amd/pm: revise the implementations for asic reset
  2022-02-11  7:52 ` [PATCH 12/12] drm/amd/pm: revise the implementations for asic reset Evan Quan
@ 2022-02-11 13:21   ` Lazar, Lijo
  2022-02-17  2:53     ` Quan, Evan
  0 siblings, 1 reply; 23+ messages in thread
From: Lazar, Lijo @ 2022-02-11 13:21 UTC (permalink / raw)
  To: Evan Quan, amd-gfx; +Cc: Alexander.Deucher, rui.huang



On 2/11/2022 1:22 PM, Evan Quan wrote:
> Instead of having an interface for every reset method, we replace them
> with a new interface which can support all reset methods.
> 
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I4c8a7121dd65c2671085673dd7c13cf7e4286f3d
> ---
>   drivers/gpu/drm/amd/amdgpu/aldebaran.c        |   2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    |   4 +-
>   drivers/gpu/drm/amd/amdgpu/cik.c              |   4 +-
>   drivers/gpu/drm/amd/amdgpu/nv.c               |  13 +-
>   drivers/gpu/drm/amd/amdgpu/soc15.c            |  12 +-
>   drivers/gpu/drm/amd/amdgpu/vi.c               |   6 +-
>   .../gpu/drm/amd/include/kgd_pp_interface.h    |   7 +-
>   drivers/gpu/drm/amd/pm/amdgpu_dpm.c           |  89 ++-----------
>   drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  13 +-
>   .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  |  86 ++++++++----
>   drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 126 +++++++++++-------
>   drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   3 -
>   12 files changed, 180 insertions(+), 185 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
> index a545df4efce1..22b787de313a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
> +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
> @@ -128,7 +128,7 @@ static int aldebaran_mode2_reset(struct amdgpu_device *adev)
>   {
>   	/* disable BM */
>   	pci_clear_master(adev->pdev);
> -	adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
> +	adev->asic_reset_res = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE2);
>   	return adev->asic_reset_res;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 7931132ce6e3..b19bfdf81500 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -4504,9 +4504,9 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
>   
>           amdgpu_device_cache_pci_state(adev->pdev);
>   
> -        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
> +        if (amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_MODE1)) {
>                   dev_info(adev->dev, "GPU smu mode1 reset\n");
> -                ret = amdgpu_dpm_mode1_reset(adev);
> +                ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE1);
>           } else {
>                   dev_info(adev->dev, "GPU psp mode1 reset\n");
>                   ret = psp_gpu_reset(adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
> index f10ce740a29c..786975716eb9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/cik.c
> +++ b/drivers/gpu/drm/amd/amdgpu/cik.c
> @@ -1380,7 +1380,7 @@ static bool cik_asic_supports_baco(struct amdgpu_device *adev)
>   	switch (adev->asic_type) {
>   	case CHIP_BONAIRE:
>   	case CHIP_HAWAII:
> -		return amdgpu_dpm_is_baco_supported(adev);
> +		return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
>   	default:
>   		return false;
>   	}
> @@ -1434,7 +1434,7 @@ static int cik_asic_reset(struct amdgpu_device *adev)
>   
>   	if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
>   		dev_info(adev->dev, "BACO reset\n");
> -		r = amdgpu_dpm_baco_reset(adev);
> +		r = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
>   	} else {
>   		dev_info(adev->dev, "PCI CONFIG reset\n");
>   		r = cik_asic_pci_config_reset(adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
> index 494e17f65fc3..2e590008d3ee 100644
> --- a/drivers/gpu/drm/amd/amdgpu/nv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/nv.c
> @@ -414,7 +414,7 @@ static int nv_asic_mode2_reset(struct amdgpu_device *adev)
>   
>   	amdgpu_device_cache_pci_state(adev->pdev);
>   
> -	ret = amdgpu_dpm_mode2_reset(adev);
> +	ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE2);
>   	if (ret)
>   		dev_err(adev->dev, "GPU mode2 reset failed\n");
>   
> @@ -458,7 +458,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
>   	case IP_VERSION(11, 0, 13):
>   		return AMD_RESET_METHOD_MODE1;
>   	default:
> -		if (amdgpu_dpm_is_baco_supported(adev))
> +		if (amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO))
>   			return AMD_RESET_METHOD_BACO;
>   		else
>   			return AMD_RESET_METHOD_MODE1;
> @@ -476,7 +476,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
>   		break;
>   	case AMD_RESET_METHOD_BACO:
>   		dev_info(adev->dev, "BACO reset\n");
> -		ret = amdgpu_dpm_baco_reset(adev);
> +		ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
>   		break;
>   	case AMD_RESET_METHOD_MODE2:
>   		dev_info(adev->dev, "MODE2 reset\n");
> @@ -641,6 +641,11 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
>   	return 0;
>   }
>   
> +static bool nv_asic_supports_baco(struct amdgpu_device *adev)
> +{
> +	return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
> +}
> +
>   static const struct amdgpu_asic_funcs nv_asic_funcs =
>   {
>   	.read_disabled_bios = &nv_read_disabled_bios,
> @@ -657,7 +662,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
>   	.need_full_reset = &nv_need_full_reset,
>   	.need_reset_on_init = &nv_need_reset_on_init,
>   	.get_pcie_replay_count = &nv_get_pcie_replay_count,
> -	.supports_baco = &amdgpu_dpm_is_baco_supported,
> +	.supports_baco = &nv_asic_supports_baco,
>   	.pre_asic_init = &nv_pre_asic_init,
>   	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
>   	.query_video_codecs = &nv_query_video_codecs,
> diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
> index a216e625c89c..15ee56406bc1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
> +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
> @@ -508,7 +508,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
>   	if (ras && adev->ras_enabled)
>   		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
>   
> -	ret = amdgpu_dpm_baco_reset(adev);
> +	ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
>   	if (ret)
>   		return ret;
>   
> @@ -553,7 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
>   	case IP_VERSION(11, 0, 2):
>   		if (adev->asic_type == CHIP_VEGA20) {
>   			if (adev->psp.sos.fw_version >= 0x80067)
> -				baco_reset = amdgpu_dpm_is_baco_supported(adev);
> +				baco_reset = amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
>   			/*
>   			 * 1. PMFW version > 0x284300: all cases use baco
>   			 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
> @@ -562,7 +562,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
>   			    adev->pm.fw_version <= 0x283400)
>   				baco_reset = false;
>   		} else {
> -			baco_reset = amdgpu_dpm_is_baco_supported(adev);
> +			baco_reset = amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
>   		}
>   		break;
>   	case IP_VERSION(13, 0, 2):
> @@ -599,7 +599,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
>   		return soc15_asic_baco_reset(adev);
>   	case AMD_RESET_METHOD_MODE2:
>   		dev_info(adev->dev, "MODE2 reset\n");
> -		return amdgpu_dpm_mode2_reset(adev);
> +		return amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_MODE2);
>   	default:
>   		dev_info(adev->dev, "MODE1 reset\n");
>   		return amdgpu_device_mode1_reset(adev);
> @@ -613,10 +613,10 @@ static bool soc15_supports_baco(struct amdgpu_device *adev)
>   	case IP_VERSION(11, 0, 2):
>   		if (adev->asic_type == CHIP_VEGA20) {
>   			if (adev->psp.sos.fw_version >= 0x80067)
> -				return amdgpu_dpm_is_baco_supported(adev);
> +				return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
>   			return false;
>   		} else {
> -			return amdgpu_dpm_is_baco_supported(adev);
> +			return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
>   		}
>   		break;
>   	default:
> diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
> index 6645ebbd2696..de510de5e62a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vi.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
> @@ -904,7 +904,7 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
>   	case CHIP_POLARIS11:
>   	case CHIP_POLARIS12:
>   	case CHIP_TOPAZ:
> -		return amdgpu_dpm_is_baco_supported(adev);
> +		return amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
>   	default:
>   		return false;
>   	}
> @@ -930,7 +930,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
>   	case CHIP_POLARIS11:
>   	case CHIP_POLARIS12:
>   	case CHIP_TOPAZ:
> -		baco_reset = amdgpu_dpm_is_baco_supported(adev);
> +		baco_reset = amdgpu_dpm_is_asic_reset_supported(adev, AMD_RESET_METHOD_BACO);
>   		break;
>   	default:
>   		baco_reset = false;
> @@ -962,7 +962,7 @@ static int vi_asic_reset(struct amdgpu_device *adev)
>   
>   	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
>   		dev_info(adev->dev, "BACO reset\n");
> -		r = amdgpu_dpm_baco_reset(adev);
> +		r = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
>   	} else {
>   		dev_info(adev->dev, "PCI CONFIG reset\n");
>   		r = vi_asic_pci_config_reset(adev);
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index 892648a4a353..8d9c32e70532 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -300,6 +300,7 @@ struct amd_pp_clocks;
>   struct pp_smu_wm_range_sets;
>   struct pp_smu_nv_clock_table;
>   struct dpm_clocks;
> +enum amd_reset_method;
>   
>   struct amd_pm_funcs {
>   /* export for dpm on ci and si */
> @@ -387,12 +388,10 @@ struct amd_pm_funcs {
>   	int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
>   	int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
>   	int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
> -	int (*get_asic_baco_capability)(void *handle, bool *cap);
>   	int (*get_asic_baco_state)(void *handle, int *state);
>   	int (*set_asic_baco_state)(void *handle, int state);
>   	int (*get_ppfeature_status)(void *handle, char *buf);
>   	int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
> -	int (*asic_reset_mode_2)(void *handle);
>   	int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
>   	int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
>   	ssize_t (*get_gpu_metrics)(void *handle, void **table);
> @@ -410,6 +409,10 @@ struct amd_pm_funcs {
>   	int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
>   	void (*pm_compute_clocks)(void *handle);
>   	bool (*is_smc_alive)(void *handle);
> +	int (*is_asic_reset_supported)(void *handle,
> +				       enum amd_reset_method reset_method);
> +	int (*asic_reset)(void *handle,
> +			  enum amd_reset_method reset_method);
>   };
>   
>   struct metrics_table_header {
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> index f237dd3a3f66..b72945f6a338 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> @@ -196,107 +196,42 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
>   	return ret;
>   }
>   
> -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
> +int amdgpu_dpm_is_asic_reset_supported(struct amdgpu_device *adev,
> +				       enum amd_reset_method reset_method)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	void *pp_handle = adev->powerplay.pp_handle;
> -	bool baco_cap;
> -	int ret = 0;
> +	int reset_supported = false;
>   
>   	if (!amdgpu_dpm_is_smc_alive(adev))
>   		return false;
>   
> -	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
> +	if (!pp_funcs || !pp_funcs->is_asic_reset_supported)
>   		return false;
>   
>   	mutex_lock(&adev->pm.mutex);
> -
> -	ret = pp_funcs->get_asic_baco_capability(pp_handle,
> -						 &baco_cap);
> -
> +	reset_supported = pp_funcs->is_asic_reset_supported(adev->powerplay.pp_handle,
> +							    reset_method);
>   	mutex_unlock(&adev->pm.mutex);
>   
> -	return ret ? false : baco_cap;
> +	return reset_supported;
>   }
>   
> -int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
> +int amdgpu_dpm_asic_reset(struct amdgpu_device *adev,
> +			  enum amd_reset_method reset_method)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	void *pp_handle = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
>   	if (!amdgpu_dpm_is_smc_alive(adev))
>   		return -EOPNOTSUPP;
>   
> -	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
> -		return -ENOENT;
> -
> -	mutex_lock(&adev->pm.mutex);
> -
> -	ret = pp_funcs->asic_reset_mode_2(pp_handle);
> -
> -	mutex_unlock(&adev->pm.mutex);
> -
> -	return ret;
> -}
> -
> -int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
> -{
> -	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	void *pp_handle = adev->powerplay.pp_handle;
> -	int ret = 0;
> -
> -	if (!amdgpu_dpm_is_smc_alive(adev))
> +	if (!pp_funcs || !pp_funcs->asic_reset)
>   		return -EOPNOTSUPP;
>   
> -	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
> -		return -ENOENT;
> -
>   	mutex_lock(&adev->pm.mutex);
> -
> -	/* enter BACO state */
> -	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
> -	if (ret)
> -		goto out;
> -
> -	/* exit BACO state */
> -	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
> -
> -out:
> +	ret = pp_funcs->asic_reset(adev->powerplay.pp_handle,
> +				   reset_method);
>   	mutex_unlock(&adev->pm.mutex);
> -	return ret;
> -}
> -
> -bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
> -{
> -	struct smu_context *smu = adev->powerplay.pp_handle;
> -	bool support_mode1_reset = false;
> -
> -	if (!amdgpu_dpm_is_smc_alive(adev))
> -		return false;
> -
> -	if (is_support_sw_smu(adev)) {
> -		mutex_lock(&adev->pm.mutex);
> -		support_mode1_reset = smu_mode1_reset_is_support(smu);
> -		mutex_unlock(&adev->pm.mutex);
> -	}
> -
> -	return support_mode1_reset;
> -}
> -
> -int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
> -{
> -	struct smu_context *smu = adev->powerplay.pp_handle;
> -	int ret = -EOPNOTSUPP;
> -
> -	if (!amdgpu_dpm_is_smc_alive(adev))
> -		return -EOPNOTSUPP;
> -
> -	if (is_support_sw_smu(adev)) {
> -		mutex_lock(&adev->pm.mutex);
> -		ret = smu_mode1_reset(smu);
> -		mutex_unlock(&adev->pm.mutex);
> -	}
>   
>   	return ret;
>   }
> diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> index 49488aebd350..bda8b8149497 100644
> --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> @@ -374,15 +374,6 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
>   				    enum PP_SMC_POWER_PROFILE type,
>   				    bool en);
>   
> -int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
> -
> -int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
> -
> -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
> -
> -bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
> -int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
> -
>   int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
>   			     enum pp_mp1_state mp1_state);
>   
> @@ -542,4 +533,8 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
>   						  unsigned int *num_states);
>   int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
>   				   struct dpm_clocks *clock_table);
> +int amdgpu_dpm_is_asic_reset_supported(struct amdgpu_device *adev,
> +				       enum amd_reset_method reset_method);
> +int amdgpu_dpm_asic_reset(struct amdgpu_device *adev,
> +			  enum amd_reset_method reset_method);
>   #endif
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> index 81ec5464b679..3edc05296e01 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> @@ -1177,20 +1177,6 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
>   	return phm_set_active_display_count(hwmgr, count);
>   }
>   
> -static int pp_get_asic_baco_capability(void *handle, bool *cap)
> -{
> -	struct pp_hwmgr *hwmgr = handle;
> -
> -	*cap = false;
> -
> -	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
> -		return 0;
> -
> -	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
> -
> -	return 0;
> -}
> -
>   static int pp_get_asic_baco_state(void *handle, int *state)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
> @@ -1242,18 +1228,6 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
>   	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
>   }
>   
> -static int pp_asic_reset_mode_2(void *handle)
> -{
> -	struct pp_hwmgr *hwmgr = handle;
> -
> -	if (hwmgr->hwmgr_func->asic_reset == NULL) {
> -		pr_info_ratelimited("%s was not implemented.\n", __func__);
> -		return -EINVAL;
> -	}
> -
> -	return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
> -}
> -
>   static int pp_smu_i2c_bus_access(void *handle, bool acquire)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
> @@ -1394,6 +1368,62 @@ static bool pp_is_smc_alive(void *handle)
>   	return false;
>   }
>   
> +static int pp_is_asic_reset_supported(void *handle,
> +				       enum amd_reset_method reset_method)
> +{
> +	struct pp_hwmgr *hwmgr = handle;
> +	bool reset_supported = false;
> +
> +	switch (reset_method) {
> +	case AMD_RESET_METHOD_BACO:
> +		if (hwmgr->hwmgr_func->get_asic_baco_capability)
> +			hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr,
> +								    &reset_supported);
> +		break;
> +	case AMD_RESET_METHOD_MODE1:
> +	case AMD_RESET_METHOD_MODE2:
> +	default:
> +		break;
> +	}
> +
> +	return reset_supported;
> +}
> +
> +static int pp_asic_reset(void *handle,
> +			 enum amd_reset_method reset_method)
> +{
> +	struct pp_hwmgr *hwmgr = handle;
> +	int ret = 0;
> +
> +	switch (reset_method) {
> +	case AMD_RESET_METHOD_MODE1:
> +		return -EOPNOTSUPP;
> +	case AMD_RESET_METHOD_MODE2:
> +		if (!hwmgr->hwmgr_func->asic_reset)
> +			return -EOPNOTSUPP;
> +
> +		ret = hwmgr->hwmgr_func->asic_reset(hwmgr,
> +						    SMU_ASIC_RESET_MODE_2);
> +		break;
> +	case AMD_RESET_METHOD_BACO:
> +		if (!hwmgr->hwmgr_func->set_asic_baco_state)
> +			return -EOPNOTSUPP;
> +
> +		ret = hwmgr->hwmgr_func->set_asic_baco_state(hwmgr,
> +							     BACO_STATE_IN);
> +		if (ret)
> +			return ret;
> +
> +		ret = hwmgr->hwmgr_func->set_asic_baco_state(hwmgr,
> +							     BACO_STATE_OUT);
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	return ret;
> +}
> +
>   static const struct amd_pm_funcs pp_dpm_funcs = {
>   	.load_firmware = pp_dpm_load_fw,
>   	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
> @@ -1446,12 +1476,10 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
>   	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
>   	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
>   	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
> -	.get_asic_baco_capability = pp_get_asic_baco_capability,
>   	.get_asic_baco_state = pp_get_asic_baco_state,
>   	.set_asic_baco_state = pp_set_asic_baco_state,
>   	.get_ppfeature_status = pp_get_ppfeature_status,
>   	.set_ppfeature_status = pp_set_ppfeature_status,
> -	.asic_reset_mode_2 = pp_asic_reset_mode_2,
>   	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
>   	.set_df_cstate = pp_set_df_cstate,
>   	.set_xgmi_pstate = pp_set_xgmi_pstate,
> @@ -1460,4 +1488,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
>   	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
>   	.pm_compute_clocks = pp_pm_compute_clocks,
>   	.is_smc_alive = pp_is_smc_alive,
> +	.is_asic_reset_supported = pp_is_asic_reset_supported,
> +	.asic_reset = pp_asic_reset,
>   };
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 3773e95a18bf..bab5ddc667f9 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -2503,18 +2503,6 @@ static int smu_set_xgmi_pstate(void *handle,
>   	return ret;
>   }
>   
> -static int smu_get_baco_capability(void *handle, bool *cap)
> -{
> -	struct smu_context *smu = handle;
> -
> -	*cap = false;
> -
> -	if (smu->ppt_funcs->baco_is_support)
> -		*cap = smu->ppt_funcs->baco_is_support(smu);
> -
> -	return 0;
> -}
> -
>   static int smu_baco_set_state(void *handle, int state)
>   {
>   	struct smu_context *smu = handle;
> @@ -2537,40 +2525,6 @@ static int smu_baco_set_state(void *handle, int state)
>   	return ret;
>   }
>   
> -bool smu_mode1_reset_is_support(struct smu_context *smu)
> -{
> -	bool ret = false;
> -
> -	if (smu->ppt_funcs->mode1_reset_is_support)
> -		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
> -
> -	return ret;
> -}
> -
> -int smu_mode1_reset(struct smu_context *smu)
> -{
> -	int ret = 0;
> -
> -	if (smu->ppt_funcs->mode1_reset)
> -		ret = smu->ppt_funcs->mode1_reset(smu);
> -
> -	return ret;
> -}
> -
> -static int smu_mode2_reset(void *handle)
> -{
> -	struct smu_context *smu = handle;
> -	int ret = 0;
> -
> -	if (smu->ppt_funcs->mode2_reset)
> -		ret = smu->ppt_funcs->mode2_reset(smu);
> -
> -	if (ret)
> -		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
> -
> -	return ret;
> -}
> -
>   static int smu_get_max_sustainable_clocks_by_dc(void *handle,
>   						struct pp_smu_nv_clock_table *max_clocks)
>   {
> @@ -2705,6 +2659,82 @@ static bool smu_is_smc_alive(void *handle)
>   	return false;
>   }
>   
> +static int smu_is_asic_reset_supported(void *handle,
> +				       enum amd_reset_method reset_method)
> +{
> +	struct smu_context *smu = handle;
> +	struct amdgpu_device *adev = smu->adev;
> +	int reset_supported = false;
> +
> +	switch (reset_method) {
> +	case AMD_RESET_METHOD_MODE1:
> +		if (smu->ppt_funcs->mode1_reset_is_support)
> +			reset_supported = smu->ppt_funcs->mode1_reset_is_support(smu);
> +		break;
> +	case AMD_RESET_METHOD_MODE2:
> +		switch (adev->ip_versions[MP1_HWIP][0]) {
> +		case IP_VERSION(11, 5, 0):
> +		case IP_VERSION(12, 0, 0):
> +		case IP_VERSION(12, 0, 1):
> +		case IP_VERSION(13, 0, 2):
> +		case IP_VERSION(13, 0, 1):
> +		case IP_VERSION(13, 0, 3):
> +			reset_supported = true;
> +			break;

Patch 2 drops mode2_reset_is_support(). What about changing to 
is_reset_supported() and avoiding other checks here?

	return smu->ppt_funcs->is_reset_supported(smu, reset_method);

Thanks,
Lijo

> +		default:
> +			break;
> +		}
> +		break;
> +	case AMD_RESET_METHOD_BACO:
> +		if (smu->ppt_funcs->baco_is_support)
> +			reset_supported = smu->ppt_funcs->baco_is_support(smu);
> +		break;
> +	default:
> +		break;
> +	}
> +
> +	return reset_supported;
> +}
> +
> +static int smu_asic_reset(void *handle,
> +			  enum amd_reset_method reset_method)
> +{
> +	struct smu_context *smu = handle;
> +	int ret = 0;
> +
> +	switch (reset_method) {
> +	case AMD_RESET_METHOD_MODE1:
> +		if (!smu->ppt_funcs->mode1_reset)
> +			return -EOPNOTSUPP;
> +
> +		ret = smu->ppt_funcs->mode1_reset(smu);
> +		break;
> +	case AMD_RESET_METHOD_MODE2:
> +		if (!smu->ppt_funcs->mode2_reset)
> +			return -EOPNOTSUPP;
> +
> +		ret = smu->ppt_funcs->mode2_reset(smu);
> +		if (ret)
> +			dev_err(smu->adev->dev, "Mode2 reset failed!\n");
> +		break;
> +	case AMD_RESET_METHOD_BACO:
> +		if (!smu->ppt_funcs->baco_enter ||
> +		    !smu->ppt_funcs->baco_exit)
> +			return -EOPNOTSUPP;
> +
> +		ret = smu->ppt_funcs->baco_enter(smu);
> +		if (ret)
> +			return ret;
> +
> +		ret = smu->ppt_funcs->baco_exit(smu);
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	return ret;
> +}
> +
>   static const struct amd_pm_funcs swsmu_pm_funcs = {
>   	/* export for sysfs */
>   	.set_fan_control_mode    = smu_set_fan_control_mode,
> @@ -2744,11 +2774,9 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
>   	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
>   	.set_active_display_count         = smu_set_display_count,
>   	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
> -	.get_asic_baco_capability         = smu_get_baco_capability,
>   	.set_asic_baco_state              = smu_baco_set_state,
>   	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
>   	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
> -	.asic_reset_mode_2                = smu_mode2_reset,
>   	.set_df_cstate                    = smu_set_df_cstate,
>   	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
>   	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
> @@ -2759,6 +2787,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
>   	.get_dpm_clock_table              = smu_get_dpm_clock_table,
>   	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
>   	.is_smc_alive = smu_is_smc_alive,
> +	.is_asic_reset_supported = smu_is_asic_reset_supported,
> +	.asic_reset              = smu_asic_reset,
>   };
>   
>   int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index bced761f3f96..ce9cd0522a40 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -1392,9 +1392,6 @@ int smu_get_power_limit(void *handle,
>   			enum pp_power_limit_level pp_limit_level,
>   			enum pp_power_type pp_power_type);
>   
> -bool smu_mode1_reset_is_support(struct smu_context *smu);
> -int smu_mode1_reset(struct smu_context *smu);
> -
>   extern const struct amd_ip_funcs smu_ip_funcs;
>   
>   bool is_support_sw_smu(struct amdgpu_device *adev);
> 

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c
  2022-02-11  7:52 ` [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c Evan Quan
  2022-02-11  8:06   ` Chen, Guchun
@ 2022-02-11 13:39   ` Lazar, Lijo
  2022-02-17  2:35     ` Quan, Evan
  1 sibling, 1 reply; 23+ messages in thread
From: Lazar, Lijo @ 2022-02-11 13:39 UTC (permalink / raw)
  To: Evan Quan, amd-gfx; +Cc: Alexander.Deucher, rui.huang



On 2/11/2022 1:22 PM, Evan Quan wrote:
> Instead of checking this in every instance(framework), moving that check to
> amdgpu_dpm.c is more proper. And that can make code clean and tidy.
> 
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I2f83a3b860e8aa12cc86f119011f520fbe21a301
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |   5 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |  16 +-
>   drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 277 ++++++++++++++++--
>   drivers/gpu/drm/amd/pm/amdgpu_pm.c            |  25 +-
>   drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  12 +-
>   .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c    |   4 -
>   .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 117 ++++----
>   drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 135 +--------
>   drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   1 -
>   9 files changed, 352 insertions(+), 240 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 2c929fa40379..fff0e6a3882e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -261,11 +261,14 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
>   {
>   	struct amdgpu_device *adev = ctx->adev;
>   	enum amd_dpm_forced_level current_level;
> +	int ret = 0;
>   
>   	if (!ctx)
>   		return -EINVAL;
>   
> -	current_level = amdgpu_dpm_get_performance_level(adev);
> +	ret = amdgpu_dpm_get_performance_level(adev, &current_level);
> +	if (ret)
> +		return ret;
>   
>   	switch (current_level) {
>   	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 9f985bd463be..56144f25b720 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -813,15 +813,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>   		unsigned i;
>   		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
>   		struct amd_vce_state *vce_state;
> +		int ret = 0;
>   
>   		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
> -			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
> -			if (vce_state) {
> -				vce_clk_table.entries[i].sclk = vce_state->sclk;
> -				vce_clk_table.entries[i].mclk = vce_state->mclk;
> -				vce_clk_table.entries[i].eclk = vce_state->evclk;
> -				vce_clk_table.num_valid_entries++;
> -			}
> +			ret = amdgpu_dpm_get_vce_clock_state(adev, i, vce_state);
> +			if (ret)
> +				return ret;
> +
> +			vce_clk_table.entries[i].sclk = vce_state->sclk;
> +			vce_clk_table.entries[i].mclk = vce_state->mclk;
> +			vce_clk_table.entries[i].eclk = vce_state->evclk;
> +			vce_clk_table.num_valid_entries++;
>   		}
>   
>   		return copy_to_user(out, &vce_clk_table,
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> index 1d63f1e8884c..b46ae0063047 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> @@ -41,6 +41,9 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return 0;
> +
>   	if (!pp_funcs->get_sclk)
>   		return 0;
>   
> @@ -57,6 +60,9 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return 0;
> +
>   	if (!pp_funcs->get_mclk)
>   		return 0;
>   
> @@ -74,6 +80,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
>   
> +	if (!adev->pm.dpm_enabled) {
> +		dev_WARN(adev->dev,
> +			 "SMU uninitialized but power %s requested for %u!\n",
> +			 gate ? "gate" : "ungate", block_type);
> +		return -EOPNOTSUPP;
> +	}
> +
>   	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
>   		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
>   				block_type, gate ? "gate" : "ungate");
> @@ -261,6 +274,9 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (amdgpu_sriov_vf(adev))
>   		return 0;
>   
> @@ -280,6 +296,9 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
> @@ -297,6 +316,9 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	void *pp_handle = adev->powerplay.pp_handle;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (pp_funcs && pp_funcs->set_df_cstate) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
> @@ -311,6 +333,9 @@ int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (is_support_sw_smu(adev)) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = smu_allow_xgmi_power_down(smu, en);
> @@ -327,6 +352,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
>   			adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
> @@ -344,6 +372,9 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
>   			adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
> @@ -362,6 +393,9 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
>   			adev->powerplay.pp_funcs;
>   	int ret = -EOPNOTSUPP;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +

I2C bus access doesn't need DPM to be enabled.

>   	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
> @@ -398,6 +432,9 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = -EINVAL;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!data || !size)
>   		return -EINVAL;
>   
> @@ -485,6 +522,9 @@ int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
>   {
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +

Please double check on this one also.

>   	if (is_support_sw_smu(adev)) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
> @@ -500,6 +540,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	mutex_lock(&adev->pm.mutex);
>   	ret = smu_send_hbm_bad_pages_num(smu, size);
>   	mutex_unlock(&adev->pm.mutex);
> @@ -514,6 +557,9 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
>   {
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (type != PP_SCLK)
>   		return -EINVAL;
>   
> @@ -538,6 +584,9 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (type != PP_SCLK)
>   		return -EINVAL;
>   
> @@ -556,14 +605,18 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
>   
>   int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
>   {
> -	struct smu_context *smu = adev->powerplay.pp_handle;
> +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!is_support_sw_smu(adev))
>   		return 0;
>   
>   	mutex_lock(&adev->pm.mutex);
> -	ret = smu_write_watermarks_table(smu);
> +	ret = pp_funcs->set_watermarks_for_clock_ranges(adev->powerplay.pp_handle,
> +							NULL);
>   	mutex_unlock(&adev->pm.mutex);
>   
>   	return ret;
> @@ -576,6 +629,9 @@ int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +

In this case also DPM doesn't need to be enabled.

In general this patch assumes DPM interfaces to continue. There was a 
discussion around getting rid of dpm and moving to smu component based 
interface. This patch goes in the opposite direction.

Thanks,
Lijo

>   	if (!is_support_sw_smu(adev))
>   		return -EOPNOTSUPP;
>   
> @@ -591,6 +647,9 @@ int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!is_support_sw_smu(adev))
>   		return -EOPNOTSUPP;
>   
> @@ -605,6 +664,9 @@ uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
>   {
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return 0;
> +
>   	if (!is_support_sw_smu(adev))
>   		return 0;
>   
> @@ -619,6 +681,9 @@ uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
>   void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
>   				 enum gfx_change_state state)
>   {
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>   	mutex_lock(&adev->pm.mutex);
>   	if (adev->powerplay.pp_funcs &&
>   	    adev->powerplay.pp_funcs->gfx_state_change_set)
> @@ -632,27 +697,33 @@ int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
>   {
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!is_support_sw_smu(adev))
>   		return -EOPNOTSUPP;
>   
>   	return smu_get_ecc_info(smu, umc_ecc);
>   }
>   
> -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> -						     uint32_t idx)
> +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> +				   uint32_t idx,
> +				   struct amd_vce_state *vstate)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	struct amd_vce_state *vstate = NULL;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
>   
>   	if (!pp_funcs->get_vce_clock_state)
> -		return NULL;
> +		return -EOPNOTSUPP;
>   
>   	mutex_lock(&adev->pm.mutex);
>   	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
>   					       idx);
>   	mutex_unlock(&adev->pm.mutex);
>   
> -	return vstate;
> +	return 0;
>   }
>   
>   void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
> @@ -660,6 +731,9 @@ void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>   	mutex_lock(&adev->pm.mutex);
>   
>   	if (!pp_funcs->get_current_power_state) {
> @@ -679,6 +753,9 @@ void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
>   void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
>   				enum amd_pm_state_type state)
>   {
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>   	mutex_lock(&adev->pm.mutex);
>   	adev->pm.dpm.user_state = state;
>   	mutex_unlock(&adev->pm.mutex);
> @@ -692,19 +769,22 @@ void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
>   		amdgpu_dpm_compute_clocks(adev);
>   }
>   
> -enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
> +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
> +				     enum amd_dpm_forced_level *level)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	enum amd_dpm_forced_level level;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
>   
>   	mutex_lock(&adev->pm.mutex);
>   	if (pp_funcs->get_performance_level)
> -		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
> +		*level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
>   	else
> -		level = adev->pm.dpm.forced_level;
> +		*level = adev->pm.dpm.forced_level;
>   	mutex_unlock(&adev->pm.mutex);
>   
> -	return level;
> +	return 0;
>   }
>   
>   int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
> @@ -717,13 +797,16 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
>   					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
>   					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->force_performance_level)
>   		return 0;
>   
>   	if (adev->pm.dpm.thermal_active)
>   		return -EINVAL;
>   
> -	current_level = amdgpu_dpm_get_performance_level(adev);
> +	amdgpu_dpm_get_performance_level(adev, &current_level);
>   	if (current_level == level)
>   		return 0;
>   
> @@ -783,6 +866,9 @@ int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_pp_num_states)
>   		return -EOPNOTSUPP;
>   
> @@ -801,6 +887,9 @@ int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->dispatch_tasks)
>   		return -EOPNOTSUPP;
>   
> @@ -818,6 +907,9 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_pp_table)
>   		return 0;
>   
> @@ -837,6 +929,9 @@ int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_fine_grain_clk_vol)
>   		return 0;
>   
> @@ -858,6 +953,9 @@ int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->odn_edit_dpm_table)
>   		return 0;
>   
> @@ -878,6 +976,9 @@ int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->print_clock_levels)
>   		return 0;
>   
> @@ -917,6 +1018,9 @@ int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_ppfeature_status)
>   		return 0;
>   
> @@ -933,6 +1037,9 @@ int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_ppfeature_status)
>   		return 0;
>   
> @@ -951,6 +1058,9 @@ int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->force_clock_level)
>   		return 0;
>   
> @@ -963,27 +1073,33 @@ int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
>   	return ret;
>   }
>   
> -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
> +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev,
> +			   uint32_t *value)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	int ret = 0;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
>   
>   	if (!pp_funcs->get_sclk_od)
> -		return 0;
> +		return -EOPNOTSUPP;
>   
>   	mutex_lock(&adev->pm.mutex);
> -	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
> +	*value = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
>   	mutex_unlock(&adev->pm.mutex);
>   
> -	return ret;
> +	return 0;
>   }
>   
>   int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (is_support_sw_smu(adev))
> -		return 0;
> +		return -EOPNOTSUPP;
>   
>   	mutex_lock(&adev->pm.mutex);
>   	if (pp_funcs->set_sclk_od)
> @@ -1000,27 +1116,33 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
>   	return 0;
>   }
>   
> -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
> +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev,
> +			   uint32_t *value)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	int ret = 0;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
>   
>   	if (!pp_funcs->get_mclk_od)
> -		return 0;
> +		return -EOPNOTSUPP;
>   
>   	mutex_lock(&adev->pm.mutex);
> -	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
> +	*value = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
>   	mutex_unlock(&adev->pm.mutex);
>   
> -	return ret;
> +	return 0;
>   }
>   
>   int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (is_support_sw_smu(adev))
> -		return 0;
> +		return -EOPNOTSUPP;
>   
>   	mutex_lock(&adev->pm.mutex);
>   	if (pp_funcs->set_mclk_od)
> @@ -1043,6 +1165,9 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_power_profile_mode)
>   		return -EOPNOTSUPP;
>   
> @@ -1060,6 +1185,9 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_power_profile_mode)
>   		return 0;
>   
> @@ -1077,6 +1205,9 @@ int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_gpu_metrics)
>   		return 0;
>   
> @@ -1094,6 +1225,9 @@ int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_fan_control_mode)
>   		return -EOPNOTSUPP;
>   
> @@ -1111,6 +1245,9 @@ int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_fan_speed_pwm)
>   		return -EOPNOTSUPP;
>   
> @@ -1128,6 +1265,9 @@ int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_fan_speed_pwm)
>   		return -EOPNOTSUPP;
>   
> @@ -1145,6 +1285,9 @@ int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_fan_speed_rpm)
>   		return -EOPNOTSUPP;
>   
> @@ -1162,6 +1305,9 @@ int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_fan_speed_rpm)
>   		return -EOPNOTSUPP;
>   
> @@ -1179,6 +1325,9 @@ int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_fan_control_mode)
>   		return -EOPNOTSUPP;
>   
> @@ -1198,6 +1347,9 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_power_limit)
>   		return -ENODATA;
>   
> @@ -1217,6 +1369,9 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_power_limit)
>   		return -EINVAL;
>   
> @@ -1232,6 +1387,9 @@ int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
>   {
>   	bool cclk_dpm_supported = false;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return false;
> +
>   	if (!is_support_sw_smu(adev))
>   		return false;
>   
> @@ -1247,6 +1405,9 @@ int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *ade
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->debugfs_print_current_performance_level)
>   		return -EOPNOTSUPP;
>   
> @@ -1265,6 +1426,9 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_smu_prv_buf_details)
>   		return -ENOSYS;
>   
> @@ -1282,6 +1446,9 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
>   	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return false;
> +
>   	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
>   	    (is_support_sw_smu(adev) && smu->is_apu) ||
>   		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
> @@ -1297,6 +1464,9 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_pp_table)
>   		return -EOPNOTSUPP;
>   
> @@ -1313,6 +1483,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
>   {
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return INT_MAX;
> +
>   	if (!is_support_sw_smu(adev))
>   		return INT_MAX;
>   
> @@ -1321,6 +1494,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
>   
>   void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
>   {
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>   	if (!is_support_sw_smu(adev))
>   		return;
>   
> @@ -1333,6 +1509,9 @@ int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->display_configuration_change)
>   		return 0;
>   
> @@ -1351,6 +1530,9 @@ int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_clock_by_type)
>   		return 0;
>   
> @@ -1369,6 +1551,9 @@ int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_display_mode_validation_clocks)
>   		return 0;
>   
> @@ -1387,6 +1572,9 @@ int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_clock_by_type_with_latency)
>   		return 0;
>   
> @@ -1406,6 +1594,9 @@ int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_clock_by_type_with_voltage)
>   		return 0;
>   
> @@ -1424,6 +1615,9 @@ int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_watermarks_for_clocks_ranges)
>   		return -EOPNOTSUPP;
>   
> @@ -1441,6 +1635,9 @@ int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->display_clock_voltage_request)
>   		return -EOPNOTSUPP;
>   
> @@ -1458,6 +1655,9 @@ int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_current_clocks)
>   		return -EOPNOTSUPP;
>   
> @@ -1473,6 +1673,9 @@ void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>   	if (!pp_funcs->notify_smu_enable_pwe)
>   		return;
>   
> @@ -1487,6 +1690,9 @@ int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_active_display_count)
>   		return -EOPNOTSUPP;
>   
> @@ -1504,6 +1710,9 @@ int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->set_min_deep_sleep_dcefclk)
>   		return -EOPNOTSUPP;
>   
> @@ -1520,6 +1729,9 @@ void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>   	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
>   		return;
>   
> @@ -1534,6 +1746,9 @@ void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>   	if (!pp_funcs->set_hard_min_fclk_by_freq)
>   		return;
>   
> @@ -1549,6 +1764,9 @@ int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->display_disable_memory_clock_switch)
>   		return 0;
>   
> @@ -1566,6 +1784,9 @@ int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
>   		return -EOPNOTSUPP;
>   
> @@ -1584,6 +1805,9 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_uclk_dpm_states)
>   		return -EOPNOTSUPP;
>   
> @@ -1602,6 +1826,9 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	int ret = 0;
>   
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs->get_dpm_clock_table)
>   		return -EOPNOTSUPP;
>   
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> index b0243068212b..84aab3bb9bdc 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> @@ -273,11 +273,14 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
>   		return ret;
>   	}
>   
> -	level = amdgpu_dpm_get_performance_level(adev);
> +	ret = amdgpu_dpm_get_performance_level(adev, &level);
>   
>   	pm_runtime_mark_last_busy(ddev->dev);
>   	pm_runtime_put_autosuspend(ddev->dev);
>   
> +	if (ret)
> +		return ret;
> +
>   	return sysfs_emit(buf, "%s\n",
>   			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
>   			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> @@ -1241,11 +1244,14 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
>   		return ret;
>   	}
>   
> -	value = amdgpu_dpm_get_sclk_od(adev);
> +	ret = amdgpu_dpm_get_sclk_od(adev, &value);
>   
>   	pm_runtime_mark_last_busy(ddev->dev);
>   	pm_runtime_put_autosuspend(ddev->dev);
>   
> +	if (ret)
> +		return ret;
> +
>   	return sysfs_emit(buf, "%d\n", value);
>   }
>   
> @@ -1275,11 +1281,14 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
>   		return ret;
>   	}
>   
> -	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
> +	ret = amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
>   
>   	pm_runtime_mark_last_busy(ddev->dev);
>   	pm_runtime_put_autosuspend(ddev->dev);
>   
> +	if (ret)
> +		return ret;
> +
>   	return count;
>   }
>   
> @@ -1303,11 +1312,14 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
>   		return ret;
>   	}
>   
> -	value = amdgpu_dpm_get_mclk_od(adev);
> +	ret = amdgpu_dpm_get_mclk_od(adev, &value);
>   
>   	pm_runtime_mark_last_busy(ddev->dev);
>   	pm_runtime_put_autosuspend(ddev->dev);
>   
> +	if (ret)
> +		return ret;
> +
>   	return sysfs_emit(buf, "%d\n", value);
>   }
>   
> @@ -1337,11 +1349,14 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
>   		return ret;
>   	}
>   
> -	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
> +	ret = amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
>   
>   	pm_runtime_mark_last_busy(ddev->dev);
>   	pm_runtime_put_autosuspend(ddev->dev);
>   
> +	if (ret)
> +		return ret;
> +
>   	return count;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> index ddfa55b59d02..49488aebd350 100644
> --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> @@ -429,12 +429,14 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
>   				 enum gfx_change_state state);
>   int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
>   			    void *umc_ecc);
> -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> -						     uint32_t idx);
> +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> +				   uint32_t idx,
> +				   struct amd_vce_state *vstate);
>   void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state);
>   void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
>   				enum amd_pm_state_type state);
> -enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
> +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
> +				     enum amd_dpm_forced_level *level);
>   int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
>   				       enum amd_dpm_forced_level level);
>   int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
> @@ -464,9 +466,9 @@ int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf);
>   int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
>   				 enum pp_clock_type type,
>   				 uint32_t mask);
> -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
> +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev, uint32_t *value);
>   int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
> -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
> +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev, uint32_t *value);
>   int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value);
>   int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
>   				      char *buf);
> diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> index 9613c6181c17..59550617cf54 100644
> --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> @@ -959,10 +959,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
>   	int ret;
>   	bool equal = false;
>   
> -	/* if dpm init failed */
> -	if (!adev->pm.dpm_enabled)
> -		return 0;
> -
>   	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
>   		/* add other state override checks here */
>   		if ((!adev->pm.dpm.thermal_active) &&
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> index 991ac4adb263..bba923cfe08c 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> @@ -295,7 +295,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
> @@ -335,7 +335,7 @@ static int pp_dpm_force_performance_level(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (level == hwmgr->dpm_level)
> @@ -353,7 +353,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	return hwmgr->dpm_level;
> @@ -363,7 +363,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return 0;
>   
>   	if (hwmgr->hwmgr_func->get_sclk == NULL) {
> @@ -377,7 +377,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return 0;
>   
>   	if (hwmgr->hwmgr_func->get_mclk == NULL) {
> @@ -391,7 +391,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return;
>   
>   	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
> @@ -405,7 +405,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return;
>   
>   	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
> @@ -420,7 +420,7 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	return hwmgr_handle_task(hwmgr, task_id, user_state);
> @@ -432,7 +432,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
>   	struct pp_power_state *state;
>   	enum amd_pm_state_type pm_type;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->current_ps)
> +	if (!hwmgr || !hwmgr->current_ps)
>   		return -EINVAL;
>   
>   	state = hwmgr->current_ps;
> @@ -462,7 +462,7 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EOPNOTSUPP;
>   
>   	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
> @@ -480,7 +480,7 @@ static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EOPNOTSUPP;
>   
>   	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
> @@ -497,7 +497,7 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EOPNOTSUPP;
>   
>   	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
> @@ -513,7 +513,7 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EOPNOTSUPP;
>   
>   	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
> @@ -529,7 +529,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EOPNOTSUPP;
>   
>   	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
> @@ -545,7 +545,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EOPNOTSUPP;
>   
>   	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
> @@ -565,7 +565,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
>   
>   	memset(data, 0, sizeof(*data));
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->ps)
> +	if (!hwmgr || !hwmgr->ps)
>   		return -EINVAL;
>   
>   	data->nums = hwmgr->num_ps;
> @@ -597,7 +597,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!hwmgr->soft_pp_table)
> +	if (!hwmgr || !hwmgr->soft_pp_table)
>   		return -EINVAL;
>   
>   	*table = (char *)hwmgr->soft_pp_table;
> @@ -625,7 +625,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
>   	struct pp_hwmgr *hwmgr = handle;
>   	int ret = -ENOMEM;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (!hwmgr->hardcode_pp_table) {
> @@ -655,7 +655,7 @@ static int pp_dpm_force_clock_level(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
> @@ -676,7 +676,7 @@ static int pp_dpm_print_clock_levels(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
> @@ -690,7 +690,7 @@ static int pp_dpm_get_sclk_od(void *handle)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
> @@ -704,7 +704,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
> @@ -719,7 +719,7 @@ static int pp_dpm_get_mclk_od(void *handle)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
> @@ -733,7 +733,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
> @@ -748,7 +748,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !value)
> +	if (!hwmgr || !value)
>   		return -EINVAL;
>   
>   	switch (idx) {
> @@ -774,7 +774,7 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return NULL;
>   
>   	if (idx < hwmgr->num_vce_state_tables)
> @@ -786,7 +786,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_power_profile_mode)
> +	if (!hwmgr || !hwmgr->hwmgr_func->get_power_profile_mode)
>   		return -EOPNOTSUPP;
>   	if (!buf)
>   		return -EINVAL;
> @@ -798,7 +798,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_power_profile_mode)
> +	if (!hwmgr || !hwmgr->hwmgr_func->set_power_profile_mode)
>   		return -EOPNOTSUPP;
>   
>   	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> @@ -813,7 +813,7 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
> @@ -826,7 +826,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
> @@ -860,7 +860,7 @@ static int pp_dpm_switch_power_profile(void *handle,
>   	long workload;
>   	uint32_t index;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
> @@ -900,7 +900,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
>   	struct pp_hwmgr *hwmgr = handle;
>   	uint32_t max_power_limit;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
> @@ -932,7 +932,7 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
>   	struct pp_hwmgr *hwmgr = handle;
>   	int ret = 0;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!limit)
> +	if (!hwmgr || !limit)
>   		return -EINVAL;
>   
>   	if (power_type != PP_PWR_TYPE_SUSTAINED)
> @@ -965,7 +965,7 @@ static int pp_display_configuration_change(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	phm_store_dal_configuration_data(hwmgr, display_config);
> @@ -977,7 +977,7 @@ static int pp_get_display_power_level(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!output)
> +	if (!hwmgr || !output)
>   		return -EINVAL;
>   
>   	return phm_get_dal_power_level(hwmgr, output);
> @@ -991,7 +991,7 @@ static int pp_get_current_clocks(void *handle,
>   	struct pp_hwmgr *hwmgr = handle;
>   	int ret = 0;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	phm_get_dal_power_level(hwmgr, &simple_clocks);
> @@ -1035,7 +1035,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (clocks == NULL)
> @@ -1050,7 +1050,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
> +	if (!hwmgr || !clocks)
>   		return -EINVAL;
>   
>   	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
> @@ -1062,7 +1062,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
> +	if (!hwmgr || !clocks)
>   		return -EINVAL;
>   
>   	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
> @@ -1073,7 +1073,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !clock_ranges)
> +	if (!hwmgr || !clock_ranges)
>   		return -EINVAL;
>   
>   	return phm_set_watermarks_for_clocks_ranges(hwmgr,
> @@ -1085,7 +1085,7 @@ static int pp_display_clock_voltage_request(void *handle,
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clock)
> +	if (!hwmgr || !clock)
>   		return -EINVAL;
>   
>   	return phm_display_clock_voltage_request(hwmgr, clock);
> @@ -1097,7 +1097,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
>   	struct pp_hwmgr *hwmgr = handle;
>   	int ret = 0;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||!clocks)
> +	if (!hwmgr || !clocks)
>   		return -EINVAL;
>   
>   	clocks->level = PP_DAL_POWERLEVEL_7;
> @@ -1112,7 +1112,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
> @@ -1127,7 +1127,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return 0;
>   
>   	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
> @@ -1142,7 +1142,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return;
>   
>   	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
> @@ -1208,7 +1208,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
> @@ -1228,8 +1228,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||
> -	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
> +	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
>   		return 0;
>   
>   	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
> @@ -1241,7 +1240,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
> @@ -1258,7 +1257,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
> @@ -1275,7 +1274,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
> @@ -1292,7 +1291,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	return phm_set_active_display_count(hwmgr, count);
> @@ -1350,7 +1349,7 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !buf)
> +	if (!hwmgr || !buf)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
> @@ -1365,7 +1364,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
> @@ -1395,7 +1394,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
> @@ -1413,7 +1412,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_df_cstate)
> +	if (!hwmgr->hwmgr_func->set_df_cstate)
>   		return 0;
>   
>   	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
> @@ -1428,7 +1427,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->set_xgmi_pstate)
> +	if (!hwmgr->hwmgr_func->set_xgmi_pstate)
>   		return 0;
>   
>   	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
> @@ -1443,7 +1442,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled || !hwmgr->hwmgr_func->get_gpu_metrics)
> +	if (!hwmgr->hwmgr_func->get_gpu_metrics)
>   		return -EOPNOTSUPP;
>   
>   	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
> @@ -1453,7 +1452,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 96a3388c2cb7..97c57a6cf314 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -68,9 +68,6 @@ static int smu_sys_get_pp_feature_mask(void *handle,
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	return smu_get_pp_feature_mask(smu, buf);
>   }
>   
> @@ -79,9 +76,6 @@ static int smu_sys_set_pp_feature_mask(void *handle,
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	return smu_set_pp_feature_mask(smu, new_mask);
>   }
>   
> @@ -219,13 +213,6 @@ static int smu_dpm_set_power_gate(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled) {
> -		dev_WARN(smu->adev->dev,
> -			 "SMU uninitialized but power %s requested for %u!\n",
> -			 gate ? "gate" : "ungate", block_type);
> -		return -EOPNOTSUPP;
> -	}
> -
>   	switch (block_type) {
>   	/*
>   	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
> @@ -315,9 +302,6 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
>   	if (!smu->adev->in_suspend)
>   		return;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return;
> -
>   	/* Enable restore flag */
>   	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
>   
> @@ -428,9 +412,6 @@ static int smu_sys_get_pp_table(void *handle,
>   	struct smu_context *smu = handle;
>   	struct smu_table_context *smu_table = &smu->smu_table;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
>   		return -EINVAL;
>   
> @@ -451,9 +432,6 @@ static int smu_sys_set_pp_table(void *handle,
>   	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (header->usStructureSize != size) {
>   		dev_err(smu->adev->dev, "pp table size not matched !\n");
>   		return -EIO;
> @@ -1564,9 +1542,6 @@ static int smu_display_configuration_change(void *handle,
>   	int index = 0;
>   	int num_of_active_display = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!display_config)
>   		return -EINVAL;
>   
> @@ -1704,9 +1679,6 @@ static int smu_handle_task(struct smu_context *smu,
>   {
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	switch (task_id) {
>   	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
>   		ret = smu_pre_display_config_changed(smu);
> @@ -1745,9 +1717,6 @@ static int smu_switch_power_profile(void *handle,
>   	long workload;
>   	uint32_t index;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
>   		return -EINVAL;
>   
> @@ -1775,9 +1744,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
>   	struct smu_context *smu = handle;
>   	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
>   		return -EINVAL;
>   
> @@ -1791,9 +1757,6 @@ static int smu_force_performance_level(void *handle,
>   	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
>   		return -EINVAL;
>   
> @@ -1817,9 +1780,6 @@ static int smu_set_display_count(void *handle, uint32_t count)
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	return smu_init_display_count(smu, count);
>   }
>   
> @@ -1830,9 +1790,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
>   	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
>   		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
>   		return -EINVAL;
> @@ -1917,9 +1874,6 @@ static int smu_set_df_cstate(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
>   		return 0;
>   
> @@ -1934,9 +1888,6 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
>   {
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
>   		return 0;
>   
> @@ -1947,22 +1898,11 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
>   	return ret;
>   }
>   
> -int smu_write_watermarks_table(struct smu_context *smu)
> -{
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
> -	return smu_set_watermarks_table(smu, NULL);
> -}
> -
>   static int smu_set_watermarks_for_clock_ranges(void *handle,
>   					       struct pp_smu_wm_range_sets *clock_ranges)
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->disable_watermark)
>   		return 0;
>   
> @@ -1973,9 +1913,6 @@ int smu_set_ac_dc(struct smu_context *smu)
>   {
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	/* controlled by firmware */
>   	if (smu->dc_controlled_by_gpio)
>   		return 0;
> @@ -2083,9 +2020,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs->set_fan_speed_rpm)
>   		return -EOPNOTSUPP;
>   
> @@ -2126,9 +2060,6 @@ int smu_get_power_limit(void *handle,
>   	uint32_t limit_type;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	switch(pp_power_type) {
>   	case PP_PWR_TYPE_SUSTAINED:
>   		limit_type = SMU_DEFAULT_PPT_LIMIT;
> @@ -2199,9 +2130,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
>   	uint32_t limit_type = limit >> 24;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	limit &= (1<<24)-1;
>   	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
>   		if (smu->ppt_funcs->set_power_limit)
> @@ -2230,9 +2158,6 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
>   {
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->print_clk_levels)
>   		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
>   
> @@ -2319,9 +2244,6 @@ static int smu_od_edit_dpm_table(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->od_edit_dpm_table) {
>   		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
>   	}
> @@ -2340,9 +2262,6 @@ static int smu_read_sensor(void *handle,
>   	int ret = 0;
>   	uint32_t *size, size_val;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!data || !size_arg)
>   		return -EINVAL;
>   
> @@ -2399,8 +2318,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled ||
> -	    !smu->ppt_funcs->get_power_profile_mode)
> +	if (!smu->ppt_funcs->get_power_profile_mode)
>   		return -EOPNOTSUPP;
>   	if (!buf)
>   		return -EINVAL;
> @@ -2414,8 +2332,7 @@ static int smu_set_power_profile_mode(void *handle,
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled ||
> -	    !smu->ppt_funcs->set_power_profile_mode)
> +	if (!smu->ppt_funcs->set_power_profile_mode)
>   		return -EOPNOTSUPP;
>   
>   	return smu_bump_power_profile_mode(smu, param, param_size);
> @@ -2426,9 +2343,6 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs->get_fan_control_mode)
>   		return -EOPNOTSUPP;
>   
> @@ -2445,9 +2359,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs->set_fan_control_mode)
>   		return -EOPNOTSUPP;
>   
> @@ -2478,9 +2389,6 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs->get_fan_speed_pwm)
>   		return -EOPNOTSUPP;
>   
> @@ -2497,9 +2405,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs->set_fan_speed_pwm)
>   		return -EOPNOTSUPP;
>   
> @@ -2524,9 +2429,6 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs->get_fan_speed_rpm)
>   		return -EOPNOTSUPP;
>   
> @@ -2542,9 +2444,6 @@ static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	return smu_set_min_dcef_deep_sleep(smu, clk);
>   }
>   
> @@ -2556,9 +2455,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
>   	enum smu_clk_type clk_type;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
>   		switch (type) {
>   		case amd_pp_sys_clock:
> @@ -2590,9 +2486,6 @@ static int smu_display_clock_voltage_request(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->display_clock_voltage_request)
>   		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
>   
> @@ -2606,9 +2499,6 @@ static int smu_display_disable_memory_clock_switch(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = -EINVAL;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->display_disable_memory_clock_switch)
>   		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
>   
> @@ -2621,9 +2511,6 @@ static int smu_set_xgmi_pstate(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->set_xgmi_pstate)
>   		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
>   
> @@ -2722,9 +2609,6 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
>   		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
>   
> @@ -2738,9 +2622,6 @@ static int smu_get_uclk_dpm_states(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->get_uclk_dpm_states)
>   		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
>   
> @@ -2752,9 +2633,6 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
>   	struct smu_context *smu = handle;
>   	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->get_current_power_state)
>   		pm_state = smu->ppt_funcs->get_current_power_state(smu);
>   
> @@ -2767,9 +2645,6 @@ static int smu_get_dpm_clock_table(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->get_dpm_clock_table)
>   		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
>   
> @@ -2780,9 +2655,6 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
>   {
>   	struct smu_context *smu = handle;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (!smu->ppt_funcs->get_gpu_metrics)
>   		return -EOPNOTSUPP;
>   
> @@ -2794,9 +2666,6 @@ static int smu_enable_mgpu_fan_boost(void *handle)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->enable_mgpu_fan_boost)
>   		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
>   
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index 39d169440d15..bced761f3f96 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -1399,7 +1399,6 @@ extern const struct amd_ip_funcs smu_ip_funcs;
>   
>   bool is_support_sw_smu(struct amdgpu_device *adev);
>   bool is_support_cclk_dpm(struct amdgpu_device *adev);
> -int smu_write_watermarks_table(struct smu_context *smu);
>   
>   int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
>   			   uint32_t *min, uint32_t *max);
> 

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs
  2022-02-11  7:52 ` [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs Evan Quan
@ 2022-02-14  4:04   ` Lazar, Lijo
  2022-02-17  2:48     ` Quan, Evan
  0 siblings, 1 reply; 23+ messages in thread
From: Lazar, Lijo @ 2022-02-14  4:04 UTC (permalink / raw)
  To: Evan Quan, amd-gfx; +Cc: Alexander.Deucher, rui.huang



On 2/11/2022 1:22 PM, Evan Quan wrote:
> Those gpu reset APIs can be granted when:
>    - System is up and dpm features are enabled.
>    - System is under resuming and dpm features are not yet enabled.
>      Under such scenario, the PMFW is already alive and can support
>      those gpu reset functionalities.
> 
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I8c2f07138921eb53a2bd7fb94f9b3622af0eacf8
> ---
>   .../gpu/drm/amd/include/kgd_pp_interface.h    |  1 +
>   drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 34 +++++++++++++++
>   .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 42 +++++++++++++++----
>   .../drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c   |  1 +
>   .../drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c   | 17 ++++++++
>   drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h  |  1 +
>   drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 32 +++++++-------
>   7 files changed, 101 insertions(+), 27 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index a4c267f15959..892648a4a353 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -409,6 +409,7 @@ struct amd_pm_funcs {
>   				   struct dpm_clocks *clock_table);
>   	int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
>   	void (*pm_compute_clocks)(void *handle);
> +	bool (*is_smc_alive)(void *handle);
>   };
>   
>   struct metrics_table_header {
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> index b46ae0063047..5f1d3342f87b 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> @@ -120,12 +120,25 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
>   	return ret;
>   }
>   
> +static bool amdgpu_dpm_is_smc_alive(struct amdgpu_device *adev)
> +{
> +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> +
> +	if (!pp_funcs || !pp_funcs->is_smc_alive)
> +		return false;
> +
> +	return pp_funcs->is_smc_alive;
> +}
> +
>   int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
>   {
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   	void *pp_handle = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
>   		return -ENOENT;
>   
> @@ -145,6 +158,9 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
>   	void *pp_handle = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
>   		return -ENOENT;
>   
> @@ -164,6 +180,9 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
>   	int ret = 0;
>   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return -EOPNOTSUPP;
> +
>   	if (pp_funcs && pp_funcs->set_mp1_state) {
>   		mutex_lock(&adev->pm.mutex);
>   
> @@ -184,6 +203,9 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
>   	bool baco_cap;
>   	int ret = 0;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return false;
> +
>   	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
>   		return false;
>   
> @@ -203,6 +225,9 @@ int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
>   	void *pp_handle = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
>   		return -ENOENT;
>   
> @@ -221,6 +246,9 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
>   	void *pp_handle = adev->powerplay.pp_handle;
>   	int ret = 0;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return -EOPNOTSUPP;
> +
>   	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
>   		return -ENOENT;
>   
> @@ -244,6 +272,9 @@ bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   	bool support_mode1_reset = false;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return false;
> +
>   	if (is_support_sw_smu(adev)) {
>   		mutex_lock(&adev->pm.mutex);
>   		support_mode1_reset = smu_mode1_reset_is_support(smu);
> @@ -258,6 +289,9 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
>   	struct smu_context *smu = adev->powerplay.pp_handle;
>   	int ret = -EOPNOTSUPP;
>   
> +	if (!amdgpu_dpm_is_smc_alive(adev))
> +		return -EOPNOTSUPP;
> +
>   	if (is_support_sw_smu(adev)) {
>   		mutex_lock(&adev->pm.mutex);
>   		ret = smu_mode1_reset(smu);
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> index bba923cfe08c..4c709f7bcd51 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> @@ -844,9 +844,6 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!hwmgr->pm_en)
> -		return 0;
> -
>   	if (hwmgr->hwmgr_func->set_mp1_state)
>   		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
>   
> @@ -1305,8 +1302,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!(hwmgr->not_vf && amdgpu_dpm) ||
> -		!hwmgr->hwmgr_func->get_asic_baco_capability)
> +	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
>   		return 0;
>   
>   	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
> @@ -1321,7 +1317,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
> +	if (!hwmgr->hwmgr_func->get_asic_baco_state)
>   		return 0;
>   
>   	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
> @@ -1336,8 +1332,7 @@ static int pp_set_asic_baco_state(void *handle, int state)
>   	if (!hwmgr)
>   		return -EINVAL;
>   
> -	if (!(hwmgr->not_vf && amdgpu_dpm) ||
> -		!hwmgr->hwmgr_func->set_asic_baco_state)
> +	if (!hwmgr->hwmgr_func->set_asic_baco_state)
>   		return 0;
>   
>   	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
> @@ -1379,7 +1374,7 @@ static int pp_asic_reset_mode_2(void *handle)
>   {
>   	struct pp_hwmgr *hwmgr = handle;
>   
> -	if (!hwmgr || !hwmgr->pm_en)
> +	if (!hwmgr)
>   		return -EINVAL;
>   
>   	if (hwmgr->hwmgr_func->asic_reset == NULL) {
> @@ -1517,6 +1512,34 @@ static void pp_pm_compute_clocks(void *handle)
>   			      NULL);
>   }
>   
> +/* MP Apertures */
> +#define MP1_Public					0x03b00000
> +#define smnMP1_FIRMWARE_FLAGS				0x3010028
> +#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK	0x00000001L
> +
> +static bool pp_is_smc_alive(void *handle)
> +{
> +	struct pp_hwmgr *hwmgr = handle;
> +	struct amdgpu_device *adev = hwmgr->adev;
> +	uint32_t mp1_fw_flags;
> +
> +	/*
> +	 * If some ASIC(e.g. smu7/smu8) needs special handling for
> +	 * checking smc alive, it should have its own implementation
> +	 * for ->is_smc_alive.
> +	 */
> +	if (hwmgr->hwmgr_func->is_smc_alive)
> +		return hwmgr->hwmgr_func->is_smc_alive(hwmgr);
> +
> +	mp1_fw_flags = RREG32_PCIE(MP1_Public |
> +				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
> +

The flags check doesn't tell whether PMFW is hung or not. It is a 
minimal thing that is set after PMFW boot. To call the API this 
condition is necessary in an implicit way. Driver always check this on 
boot, if not driver aborts smu init.

So better thing is to go ahead and send the message without any check, 
it will tell the result whether PMFW is really working or not.

In short this API is not needed.

Thanks,
Lijo

> +	if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
> +		return true;
> +
> +	return false;
> +}
> +
>   static const struct amd_pm_funcs pp_dpm_funcs = {
>   	.load_firmware = pp_dpm_load_fw,
>   	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
> @@ -1582,4 +1605,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
>   	.gfx_state_change_set = pp_gfx_state_change_set,
>   	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
>   	.pm_compute_clocks = pp_pm_compute_clocks,
> +	.is_smc_alive = pp_is_smc_alive,
>   };
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
> index a1e11037831a..118039b96524 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
> @@ -5735,6 +5735,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
>   	.get_asic_baco_state = smu7_baco_get_state,
>   	.set_asic_baco_state = smu7_baco_set_state,
>   	.power_off_asic = smu7_power_off_asic,
> +	.is_smc_alive = smu7_is_smc_ram_running,
>   };
>   
>   uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
> index b50fd4a4a3d1..fc4d58329f6d 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
> @@ -2015,6 +2015,22 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
>   	}
>   }
>   
> +#define ixMP1_FIRMWARE_FLAGS					0x3008210
> +#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK		0x00000001L
> +
> +static bool smu8_is_smc_running(struct pp_hwmgr *hwmgr)
> +{
> +	struct amdgpu_device *adev = hwmgr->adev;
> +	uint32_t mp1_fw_flags;
> +
> +	mp1_fw_flags = RREG32_SMC(ixMP1_FIRMWARE_FLAGS);
> +
> +	if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
> +		return true;
> +
> +	return false;
> +}
> +
>   static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
>   	.backend_init = smu8_hwmgr_backend_init,
>   	.backend_fini = smu8_hwmgr_backend_fini,
> @@ -2047,6 +2063,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
>   	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
>   	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
>   	.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
> +	.is_smc_alive = smu8_is_smc_running,
>   };
>   
>   int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> index 4f7f2f455301..790fc387752c 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> @@ -364,6 +364,7 @@ struct pp_hwmgr_func {
>   					bool disable);
>   	ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table);
>   	int (*gfx_state_change)(struct pp_hwmgr *hwmgr, uint32_t state);
> +	bool (*is_smc_alive)(struct pp_hwmgr *hwmgr);
>   };
>   
>   struct pp_table_func {
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 8b8feaf7aa0e..27a453fb4db7 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -1845,9 +1845,6 @@ static int smu_set_mp1_state(void *handle,
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->pm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs &&
>   	    smu->ppt_funcs->set_mp1_state)
>   		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
> @@ -2513,9 +2510,6 @@ static int smu_get_baco_capability(void *handle, bool *cap)
>   
>   	*cap = false;
>   
> -	if (!smu->pm_enabled)
> -		return 0;
> -
>   	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
>   		*cap = smu->ppt_funcs->baco_is_support(smu);
>   
> @@ -2527,9 +2521,6 @@ static int smu_baco_set_state(void *handle, int state)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->pm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (state == 0) {
>   		if (smu->ppt_funcs->baco_exit)
>   			ret = smu->ppt_funcs->baco_exit(smu);
> @@ -2551,9 +2542,6 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
>   {
>   	bool ret = false;
>   
> -	if (!smu->pm_enabled)
> -		return false;
> -
>   	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
>   		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
>   
> @@ -2564,9 +2552,6 @@ int smu_mode1_reset(struct smu_context *smu)
>   {
>   	int ret = 0;
>   
> -	if (!smu->pm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->mode1_reset)
>   		ret = smu->ppt_funcs->mode1_reset(smu);
>   
> @@ -2578,9 +2563,6 @@ static int smu_mode2_reset(void *handle)
>   	struct smu_context *smu = handle;
>   	int ret = 0;
>   
> -	if (!smu->pm_enabled)
> -		return -EOPNOTSUPP;
> -
>   	if (smu->ppt_funcs->mode2_reset)
>   		ret = smu->ppt_funcs->mode2_reset(smu);
>   
> @@ -2712,6 +2694,19 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
>   	return 0;
>   }
>   
> +static bool smu_is_smc_alive(void *handle)
> +{
> +	struct smu_context *smu = handle;
> +
> +	if (!smu->ppt_funcs->check_fw_status)
> +		return false;
> +
> +	if (!smu->ppt_funcs->check_fw_status(smu))
> +		return true;
> +
> +	return false;
> +}
> +
>   static const struct amd_pm_funcs swsmu_pm_funcs = {
>   	/* export for sysfs */
>   	.set_fan_control_mode    = smu_set_fan_control_mode,
> @@ -2765,6 +2760,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
>   	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
>   	.get_dpm_clock_table              = smu_get_dpm_clock_table,
>   	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
> +	.is_smc_alive = smu_is_smc_alive,
>   };
>   
>   int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
> 

^ permalink raw reply	[flat|nested] 23+ messages in thread

* RE: [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c
  2022-02-11  8:06   ` Chen, Guchun
@ 2022-02-17  1:53     ` Quan, Evan
  0 siblings, 0 replies; 23+ messages in thread
From: Quan, Evan @ 2022-02-17  1:53 UTC (permalink / raw)
  To: Chen, Guchun, amd-gfx; +Cc: Deucher, Alexander, Lazar, Lijo, Huang, Ray

[Public]



> -----Original Message-----
> From: Chen, Guchun <Guchun.Chen@amd.com>
> Sent: Friday, February 11, 2022 4:07 PM
> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo
> <Lijo.Lazar@amd.com>; Quan, Evan <Evan.Quan@amd.com>; Huang, Ray
> <Ray.Huang@amd.com>
> Subject: RE: [PATCH 05/12] drm/amd/pm: move the check for dpm
> enablement to amdgpu_dpm.c
> 
> [Public]
> 
> mutex_lock(&adev->pm.mutex);
> -	ret = smu_write_watermarks_table(smu);
> +	ret = pp_funcs->set_watermarks_for_clock_ranges(adev-
> >powerplay.pp_handle,
> +							NULL);
>  	mutex_unlock(&adev->pm.mutex);
> 
> I guess we should separate this from this patch, and send another patch to
> address it.
[Quan, Evan] OK.

BR
Evan
> 
> Regards,
> Guchun
> 
> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Evan
> Quan
> Sent: Friday, February 11, 2022 3:52 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>; Lazar, Lijo
> <Lijo.Lazar@amd.com>; Quan, Evan <Evan.Quan@amd.com>;
> rui.huang@amd.com
> Subject: [PATCH 05/12] drm/amd/pm: move the check for dpm enablement
> to amdgpu_dpm.c
> 
> Instead of checking this in every instance(framework), moving that check to
> amdgpu_dpm.c is more proper. And that can make code clean and tidy.
> 
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Change-Id: I2f83a3b860e8aa12cc86f119011f520fbe21a301
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |   5 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |  16 +-
>  drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 277 ++++++++++++++++-
> -
>  drivers/gpu/drm/amd/pm/amdgpu_pm.c            |  25 +-
>  drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  12 +-
>  .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c    |   4 -
>  .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 117 ++++----
>  drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 135 +--------
>  drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   1 -
>  9 files changed, 352 insertions(+), 240 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 2c929fa40379..fff0e6a3882e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -261,11 +261,14 @@ static int amdgpu_ctx_get_stable_pstate(struct
> amdgpu_ctx *ctx,
>  {
>  	struct amdgpu_device *adev = ctx->adev;
>  	enum amd_dpm_forced_level current_level;
> +	int ret = 0;
> 
>  	if (!ctx)
>  		return -EINVAL;
> 
> -	current_level = amdgpu_dpm_get_performance_level(adev);
> +	ret = amdgpu_dpm_get_performance_level(adev, &current_level);
> +	if (ret)
> +		return ret;
> 
>  	switch (current_level) {
>  	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 9f985bd463be..56144f25b720 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -813,15 +813,17 @@ int amdgpu_info_ioctl(struct drm_device *dev,
> void *data, struct drm_file *filp)
>  		unsigned i;
>  		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
>  		struct amd_vce_state *vce_state;
> +		int ret = 0;
> 
>  		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
> -			vce_state =
> amdgpu_dpm_get_vce_clock_state(adev, i);
> -			if (vce_state) {
> -				vce_clk_table.entries[i].sclk = vce_state-
> >sclk;
> -				vce_clk_table.entries[i].mclk = vce_state-
> >mclk;
> -				vce_clk_table.entries[i].eclk = vce_state-
> >evclk;
> -				vce_clk_table.num_valid_entries++;
> -			}
> +			ret = amdgpu_dpm_get_vce_clock_state(adev, i,
> vce_state);
> +			if (ret)
> +				return ret;
> +
> +			vce_clk_table.entries[i].sclk = vce_state->sclk;
> +			vce_clk_table.entries[i].mclk = vce_state->mclk;
> +			vce_clk_table.entries[i].eclk = vce_state->evclk;
> +			vce_clk_table.num_valid_entries++;
>  		}
> 
>  		return copy_to_user(out, &vce_clk_table,
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> index 1d63f1e8884c..b46ae0063047 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> @@ -41,6 +41,9 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device
> *adev, bool low)
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return 0;
> +
>  	if (!pp_funcs->get_sclk)
>  		return 0;
> 
> @@ -57,6 +60,9 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device
> *adev, bool low)
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return 0;
> +
>  	if (!pp_funcs->get_mclk)
>  		return 0;
> 
> @@ -74,6 +80,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct
> amdgpu_device *adev, uint32_t block
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF :
> POWER_STATE_ON;
> 
> +	if (!adev->pm.dpm_enabled) {
> +		dev_WARN(adev->dev,
> +			 "SMU uninitialized but power %s requested
> for %u!\n",
> +			 gate ? "gate" : "ungate", block_type);
> +		return -EOPNOTSUPP;
> +	}
> +
>  	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
>  		dev_dbg(adev->dev, "IP block%d already in the target %s
> state!",
>  				block_type, gate ? "gate" : "ungate");
> @@ -261,6 +274,9 @@ int amdgpu_dpm_switch_power_profile(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (amdgpu_sriov_vf(adev))
>  		return 0;
> 
> @@ -280,6 +296,9 @@ int amdgpu_dpm_set_xgmi_pstate(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
>  		mutex_lock(&adev->pm.mutex);
>  		ret = pp_funcs->set_xgmi_pstate(adev-
> >powerplay.pp_handle,
> @@ -297,6 +316,9 @@ int amdgpu_dpm_set_df_cstate(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	void *pp_handle = adev->powerplay.pp_handle;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (pp_funcs && pp_funcs->set_df_cstate) {
>  		mutex_lock(&adev->pm.mutex);
>  		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
> @@ -311,6 +333,9 @@ int amdgpu_dpm_allow_xgmi_power_down(struct
> amdgpu_device *adev, bool en)
>  	struct smu_context *smu = adev->powerplay.pp_handle;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (is_support_sw_smu(adev)) {
>  		mutex_lock(&adev->pm.mutex);
>  		ret = smu_allow_xgmi_power_down(smu, en);
> @@ -327,6 +352,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct
> amdgpu_device *adev)
>  			adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
>  		mutex_lock(&adev->pm.mutex);
>  		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
> @@ -344,6 +372,9 @@ int amdgpu_dpm_set_clockgating_by_smu(struct
> amdgpu_device *adev,
>  			adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
>  		mutex_lock(&adev->pm.mutex);
>  		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
> @@ -362,6 +393,9 @@ int amdgpu_dpm_smu_i2c_bus_access(struct
> amdgpu_device *adev,
>  			adev->powerplay.pp_funcs;
>  	int ret = -EOPNOTSUPP;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
>  		mutex_lock(&adev->pm.mutex);
>  		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
> @@ -398,6 +432,9 @@ int amdgpu_dpm_read_sensor(struct
> amdgpu_device *adev, enum amd_pp_sensors senso
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = -EINVAL;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!data || !size)
>  		return -EINVAL;
> 
> @@ -485,6 +522,9 @@ int amdgpu_dpm_handle_passthrough_sbr(struct
> amdgpu_device *adev, bool enable)
>  {
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (is_support_sw_smu(adev)) {
>  		mutex_lock(&adev->pm.mutex);
>  		ret = smu_handle_passthrough_sbr(adev-
> >powerplay.pp_handle,
> @@ -500,6 +540,9 @@ int
> amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev,
> uint32_t size)
>  	struct smu_context *smu = adev->powerplay.pp_handle;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	mutex_lock(&adev->pm.mutex);
>  	ret = smu_send_hbm_bad_pages_num(smu, size);
>  	mutex_unlock(&adev->pm.mutex);
> @@ -514,6 +557,9 @@ int amdgpu_dpm_get_dpm_freq_range(struct
> amdgpu_device *adev,
>  {
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (type != PP_SCLK)
>  		return -EINVAL;
> 
> @@ -538,6 +584,9 @@ int amdgpu_dpm_set_soft_freq_range(struct
> amdgpu_device *adev,
>  	struct smu_context *smu = adev->powerplay.pp_handle;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (type != PP_SCLK)
>  		return -EINVAL;
> 
> @@ -556,14 +605,18 @@ int amdgpu_dpm_set_soft_freq_range(struct
> amdgpu_device *adev,
> 
>  int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
>  {
> -	struct smu_context *smu = adev->powerplay.pp_handle;
> +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!is_support_sw_smu(adev))
>  		return 0;
> 
>  	mutex_lock(&adev->pm.mutex);
> -	ret = smu_write_watermarks_table(smu);
> +	ret = pp_funcs->set_watermarks_for_clock_ranges(adev-
> >powerplay.pp_handle,
> +							NULL);
>  	mutex_unlock(&adev->pm.mutex);
> 
>  	return ret;
> @@ -576,6 +629,9 @@ int amdgpu_dpm_wait_for_event(struct
> amdgpu_device *adev,
>  	struct smu_context *smu = adev->powerplay.pp_handle;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!is_support_sw_smu(adev))
>  		return -EOPNOTSUPP;
> 
> @@ -591,6 +647,9 @@ int amdgpu_dpm_get_status_gfxoff(struct
> amdgpu_device *adev, uint32_t *value)
>  	struct smu_context *smu = adev->powerplay.pp_handle;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!is_support_sw_smu(adev))
>  		return -EOPNOTSUPP;
> 
> @@ -605,6 +664,9 @@ uint64_t
> amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device
> *adev)
>  {
>  	struct smu_context *smu = adev->powerplay.pp_handle;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return 0;
> +
>  	if (!is_support_sw_smu(adev))
>  		return 0;
> 
> @@ -619,6 +681,9 @@ uint64_t
> amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device
> *adev)
>  void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
>  				 enum gfx_change_state state)
>  {
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>  	mutex_lock(&adev->pm.mutex);
>  	if (adev->powerplay.pp_funcs &&
>  	    adev->powerplay.pp_funcs->gfx_state_change_set)
> @@ -632,27 +697,33 @@ int amdgpu_dpm_get_ecc_info(struct
> amdgpu_device *adev,
>  {
>  	struct smu_context *smu = adev->powerplay.pp_handle;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!is_support_sw_smu(adev))
>  		return -EOPNOTSUPP;
> 
>  	return smu_get_ecc_info(smu, umc_ecc);
>  }
> 
> -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct
> amdgpu_device *adev,
> -						     uint32_t idx)
> +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> +				   uint32_t idx,
> +				   struct amd_vce_state *vstate)
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	struct amd_vce_state *vstate = NULL;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> 
>  	if (!pp_funcs->get_vce_clock_state)
> -		return NULL;
> +		return -EOPNOTSUPP;
> 
>  	mutex_lock(&adev->pm.mutex);
>  	vstate = pp_funcs->get_vce_clock_state(adev-
> >powerplay.pp_handle,
>  					       idx);
>  	mutex_unlock(&adev->pm.mutex);
> 
> -	return vstate;
> +	return 0;
>  }
> 
>  void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
> @@ -660,6 +731,9 @@ void amdgpu_dpm_get_current_power_state(struct
> amdgpu_device *adev,
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>  	mutex_lock(&adev->pm.mutex);
> 
>  	if (!pp_funcs->get_current_power_state) {
> @@ -679,6 +753,9 @@ void amdgpu_dpm_get_current_power_state(struct
> amdgpu_device *adev,
>  void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
>  				enum amd_pm_state_type state)
>  {
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>  	mutex_lock(&adev->pm.mutex);
>  	adev->pm.dpm.user_state = state;
>  	mutex_unlock(&adev->pm.mutex);
> @@ -692,19 +769,22 @@ void amdgpu_dpm_set_power_state(struct
> amdgpu_device *adev,
>  		amdgpu_dpm_compute_clocks(adev);
>  }
> 
> -enum amd_dpm_forced_level
> amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
> +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
> +				     enum amd_dpm_forced_level *level)
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	enum amd_dpm_forced_level level;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> 
>  	mutex_lock(&adev->pm.mutex);
>  	if (pp_funcs->get_performance_level)
> -		level = pp_funcs->get_performance_level(adev-
> >powerplay.pp_handle);
> +		*level = pp_funcs->get_performance_level(adev-
> >powerplay.pp_handle);
>  	else
> -		level = adev->pm.dpm.forced_level;
> +		*level = adev->pm.dpm.forced_level;
>  	mutex_unlock(&adev->pm.mutex);
> 
> -	return level;
> +	return 0;
>  }
> 
>  int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
> @@ -717,13 +797,16 @@ int amdgpu_dpm_force_performance_level(struct
> amdgpu_device *adev,
> 
> 	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
> 
> 	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->force_performance_level)
>  		return 0;
> 
>  	if (adev->pm.dpm.thermal_active)
>  		return -EINVAL;
> 
> -	current_level = amdgpu_dpm_get_performance_level(adev);
> +	amdgpu_dpm_get_performance_level(adev, &current_level);
>  	if (current_level == level)
>  		return 0;
> 
> @@ -783,6 +866,9 @@ int amdgpu_dpm_get_pp_num_states(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_pp_num_states)
>  		return -EOPNOTSUPP;
> 
> @@ -801,6 +887,9 @@ int amdgpu_dpm_dispatch_task(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->dispatch_tasks)
>  		return -EOPNOTSUPP;
> 
> @@ -818,6 +907,9 @@ int amdgpu_dpm_get_pp_table(struct
> amdgpu_device *adev, char **table)
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_pp_table)
>  		return 0;
> 
> @@ -837,6 +929,9 @@ int amdgpu_dpm_set_fine_grain_clk_vol(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_fine_grain_clk_vol)
>  		return 0;
> 
> @@ -858,6 +953,9 @@ int amdgpu_dpm_odn_edit_dpm_table(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->odn_edit_dpm_table)
>  		return 0;
> 
> @@ -878,6 +976,9 @@ int amdgpu_dpm_print_clock_levels(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->print_clock_levels)
>  		return 0;
> 
> @@ -917,6 +1018,9 @@ int amdgpu_dpm_set_ppfeature_status(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_ppfeature_status)
>  		return 0;
> 
> @@ -933,6 +1037,9 @@ int amdgpu_dpm_get_ppfeature_status(struct
> amdgpu_device *adev, char *buf)
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_ppfeature_status)
>  		return 0;
> 
> @@ -951,6 +1058,9 @@ int amdgpu_dpm_force_clock_level(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->force_clock_level)
>  		return 0;
> 
> @@ -963,27 +1073,33 @@ int amdgpu_dpm_force_clock_level(struct
> amdgpu_device *adev,
>  	return ret;
>  }
> 
> -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
> +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev,
> +			   uint32_t *value)
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	int ret = 0;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> 
>  	if (!pp_funcs->get_sclk_od)
> -		return 0;
> +		return -EOPNOTSUPP;
> 
>  	mutex_lock(&adev->pm.mutex);
> -	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
> +	*value = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
>  	mutex_unlock(&adev->pm.mutex);
> 
> -	return ret;
> +	return 0;
>  }
> 
>  int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (is_support_sw_smu(adev))
> -		return 0;
> +		return -EOPNOTSUPP;
> 
>  	mutex_lock(&adev->pm.mutex);
>  	if (pp_funcs->set_sclk_od)
> @@ -1000,27 +1116,33 @@ int amdgpu_dpm_set_sclk_od(struct
> amdgpu_device *adev, uint32_t value)
>  	return 0;
>  }
> 
> -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
> +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev,
> +			   uint32_t *value)
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> -	int ret = 0;
> +
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> 
>  	if (!pp_funcs->get_mclk_od)
> -		return 0;
> +		return -EOPNOTSUPP;
> 
>  	mutex_lock(&adev->pm.mutex);
> -	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
> +	*value = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
>  	mutex_unlock(&adev->pm.mutex);
> 
> -	return ret;
> +	return 0;
>  }
> 
>  int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t
> value)
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (is_support_sw_smu(adev))
> -		return 0;
> +		return -EOPNOTSUPP;
> 
>  	mutex_lock(&adev->pm.mutex);
>  	if (pp_funcs->set_mclk_od)
> @@ -1043,6 +1165,9 @@ int amdgpu_dpm_get_power_profile_mode(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_power_profile_mode)
>  		return -EOPNOTSUPP;
> 
> @@ -1060,6 +1185,9 @@ int amdgpu_dpm_set_power_profile_mode(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_power_profile_mode)
>  		return 0;
> 
> @@ -1077,6 +1205,9 @@ int amdgpu_dpm_get_gpu_metrics(struct
> amdgpu_device *adev, void **table)
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_gpu_metrics)
>  		return 0;
> 
> @@ -1094,6 +1225,9 @@ int amdgpu_dpm_get_fan_control_mode(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_fan_control_mode)
>  		return -EOPNOTSUPP;
> 
> @@ -1111,6 +1245,9 @@ int amdgpu_dpm_set_fan_speed_pwm(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_fan_speed_pwm)
>  		return -EOPNOTSUPP;
> 
> @@ -1128,6 +1265,9 @@ int amdgpu_dpm_get_fan_speed_pwm(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_fan_speed_pwm)
>  		return -EOPNOTSUPP;
> 
> @@ -1145,6 +1285,9 @@ int amdgpu_dpm_get_fan_speed_rpm(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_fan_speed_rpm)
>  		return -EOPNOTSUPP;
> 
> @@ -1162,6 +1305,9 @@ int amdgpu_dpm_set_fan_speed_rpm(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_fan_speed_rpm)
>  		return -EOPNOTSUPP;
> 
> @@ -1179,6 +1325,9 @@ int amdgpu_dpm_set_fan_control_mode(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_fan_control_mode)
>  		return -EOPNOTSUPP;
> 
> @@ -1198,6 +1347,9 @@ int amdgpu_dpm_get_power_limit(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_power_limit)
>  		return -ENODATA;
> 
> @@ -1217,6 +1369,9 @@ int amdgpu_dpm_set_power_limit(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_power_limit)
>  		return -EINVAL;
> 
> @@ -1232,6 +1387,9 @@ int amdgpu_dpm_is_cclk_dpm_supported(struct
> amdgpu_device *adev)
>  {
>  	bool cclk_dpm_supported = false;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return false;
> +
>  	if (!is_support_sw_smu(adev))
>  		return false;
> 
> @@ -1247,6 +1405,9 @@ int
> amdgpu_dpm_debugfs_print_current_performance_level(struct
> amdgpu_device *ade
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->debugfs_print_current_performance_level)
>  		return -EOPNOTSUPP;
> 
> @@ -1265,6 +1426,9 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_smu_prv_buf_details)
>  		return -ENOSYS;
> 
> @@ -1282,6 +1446,9 @@ int amdgpu_dpm_is_overdrive_supported(struct
> amdgpu_device *adev)
>  	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
>  	struct smu_context *smu = adev->powerplay.pp_handle;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return false;
> +
>  	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
>  	    (is_support_sw_smu(adev) && smu->is_apu) ||
>  		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
> @@ -1297,6 +1464,9 @@ int amdgpu_dpm_set_pp_table(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_pp_table)
>  		return -EOPNOTSUPP;
> 
> @@ -1313,6 +1483,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct
> amdgpu_device *adev)
>  {
>  	struct smu_context *smu = adev->powerplay.pp_handle;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return INT_MAX;
> +
>  	if (!is_support_sw_smu(adev))
>  		return INT_MAX;
> 
> @@ -1321,6 +1494,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct
> amdgpu_device *adev)
> 
>  void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
>  {
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>  	if (!is_support_sw_smu(adev))
>  		return;
> 
> @@ -1333,6 +1509,9 @@ int
> amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->display_configuration_change)
>  		return 0;
> 
> @@ -1351,6 +1530,9 @@ int amdgpu_dpm_get_clock_by_type(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_clock_by_type)
>  		return 0;
> 
> @@ -1369,6 +1551,9 @@ int
> amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device
> *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_display_mode_validation_clocks)
>  		return 0;
> 
> @@ -1387,6 +1572,9 @@ int
> amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device
> *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_clock_by_type_with_latency)
>  		return 0;
> 
> @@ -1406,6 +1594,9 @@ int
> amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device
> *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_clock_by_type_with_voltage)
>  		return 0;
> 
> @@ -1424,6 +1615,9 @@ int
> amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device
> *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_watermarks_for_clocks_ranges)
>  		return -EOPNOTSUPP;
> 
> @@ -1441,6 +1635,9 @@ int
> amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->display_clock_voltage_request)
>  		return -EOPNOTSUPP;
> 
> @@ -1458,6 +1655,9 @@ int amdgpu_dpm_get_current_clocks(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_current_clocks)
>  		return -EOPNOTSUPP;
> 
> @@ -1473,6 +1673,9 @@ void
> amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>  	if (!pp_funcs->notify_smu_enable_pwe)
>  		return;
> 
> @@ -1487,6 +1690,9 @@ int amdgpu_dpm_set_active_display_count(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_active_display_count)
>  		return -EOPNOTSUPP;
> 
> @@ -1504,6 +1710,9 @@ int
> amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->set_min_deep_sleep_dcefclk)
>  		return -EOPNOTSUPP;
> 
> @@ -1520,6 +1729,9 @@ void
> amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>  	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
>  		return;
> 
> @@ -1534,6 +1746,9 @@ void
> amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
>  {
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return;
> +
>  	if (!pp_funcs->set_hard_min_fclk_by_freq)
>  		return;
> 
> @@ -1549,6 +1764,9 @@ int
> amdgpu_dpm_display_disable_memory_clock_switch(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->display_disable_memory_clock_switch)
>  		return 0;
> 
> @@ -1566,6 +1784,9 @@ int
> amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device
> *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
>  		return -EOPNOTSUPP;
> 
> @@ -1584,6 +1805,9 @@ enum pp_smu_status
> amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_uclk_dpm_states)
>  		return -EOPNOTSUPP;
> 
> @@ -1602,6 +1826,9 @@ int amdgpu_dpm_get_dpm_clock_table(struct
> amdgpu_device *adev,
>  	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>  	int ret = 0;
> 
> +	if (!adev->pm.dpm_enabled)
> +		return -EOPNOTSUPP;
> +
>  	if (!pp_funcs->get_dpm_clock_table)
>  		return -EOPNOTSUPP;
> 
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> index b0243068212b..84aab3bb9bdc 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> @@ -273,11 +273,14 @@ static ssize_t
> amdgpu_get_power_dpm_force_performance_level(struct device *dev,
>  		return ret;
>  	}
> 
> -	level = amdgpu_dpm_get_performance_level(adev);
> +	ret = amdgpu_dpm_get_performance_level(adev, &level);
> 
>  	pm_runtime_mark_last_busy(ddev->dev);
>  	pm_runtime_put_autosuspend(ddev->dev);
> 
> +	if (ret)
> +		return ret;
> +
>  	return sysfs_emit(buf, "%s\n",
>  			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
> "auto" :
>  			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> @@ -1241,11 +1244,14 @@ static ssize_t amdgpu_get_pp_sclk_od(struct
> device *dev,
>  		return ret;
>  	}
> 
> -	value = amdgpu_dpm_get_sclk_od(adev);
> +	ret = amdgpu_dpm_get_sclk_od(adev, &value);
> 
>  	pm_runtime_mark_last_busy(ddev->dev);
>  	pm_runtime_put_autosuspend(ddev->dev);
> 
> +	if (ret)
> +		return ret;
> +
>  	return sysfs_emit(buf, "%d\n", value);
>  }
> 
> @@ -1275,11 +1281,14 @@ static ssize_t amdgpu_set_pp_sclk_od(struct
> device *dev,
>  		return ret;
>  	}
> 
> -	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
> +	ret = amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
> 
>  	pm_runtime_mark_last_busy(ddev->dev);
>  	pm_runtime_put_autosuspend(ddev->dev);
> 
> +	if (ret)
> +		return ret;
> +
>  	return count;
>  }
> 
> @@ -1303,11 +1312,14 @@ static ssize_t amdgpu_get_pp_mclk_od(struct
> device *dev,
>  		return ret;
>  	}
> 
> -	value = amdgpu_dpm_get_mclk_od(adev);
> +	ret = amdgpu_dpm_get_mclk_od(adev, &value);
> 
>  	pm_runtime_mark_last_busy(ddev->dev);
>  	pm_runtime_put_autosuspend(ddev->dev);
> 
> +	if (ret)
> +		return ret;
> +
>  	return sysfs_emit(buf, "%d\n", value);
>  }
> 
> @@ -1337,11 +1349,14 @@ static ssize_t amdgpu_set_pp_mclk_od(struct
> device *dev,
>  		return ret;
>  	}
> 
> -	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
> +	ret = amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
> 
>  	pm_runtime_mark_last_busy(ddev->dev);
>  	pm_runtime_put_autosuspend(ddev->dev);
> 
> +	if (ret)
> +		return ret;
> +
>  	return count;
>  }
> 
> diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> index ddfa55b59d02..49488aebd350 100644
> --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> @@ -429,12 +429,14 @@ void amdgpu_dpm_gfx_state_change(struct
> amdgpu_device *adev,
>  				 enum gfx_change_state state);
>  int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
>  			    void *umc_ecc);
> -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct
> amdgpu_device *adev,
> -						     uint32_t idx);
> +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> +				   uint32_t idx,
> +				   struct amd_vce_state *vstate);
>  void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
> enum amd_pm_state_type *state);
>  void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
>  				enum amd_pm_state_type state);
> -enum amd_dpm_forced_level
> amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
> +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
> +				     enum amd_dpm_forced_level *level);
>  int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
>  				       enum amd_dpm_forced_level level);
>  int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
> @@ -464,9 +466,9 @@ int amdgpu_dpm_get_ppfeature_status(struct
> amdgpu_device *adev, char *buf);
>  int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
>  				 enum pp_clock_type type,
>  				 uint32_t mask);
> -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
> +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev, uint32_t
> *value);
>  int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
> -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
> +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev, uint32_t
> *value);
>  int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t
> value);
>  int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
>  				      char *buf);
> diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> index 9613c6181c17..59550617cf54 100644
> --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> @@ -959,10 +959,6 @@ static int
> amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
>  	int ret;
>  	bool equal = false;
> 
> -	/* if dpm init failed */
> -	if (!adev->pm.dpm_enabled)
> -		return 0;
> -
>  	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
>  		/* add other state override checks here */
>  		if ((!adev->pm.dpm.thermal_active) &&
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> index 991ac4adb263..bba923cfe08c 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> @@ -295,7 +295,7 @@ static int pp_set_clockgating_by_smu(void *handle,
> uint32_t msg_id)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
> @@ -335,7 +335,7 @@ static int pp_dpm_force_performance_level(void
> *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (level == hwmgr->dpm_level)
> @@ -353,7 +353,7 @@ static enum amd_dpm_forced_level
> pp_dpm_get_performance_level(
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	return hwmgr->dpm_level;
> @@ -363,7 +363,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool
> low)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return 0;
> 
>  	if (hwmgr->hwmgr_func->get_sclk == NULL) {
> @@ -377,7 +377,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool
> low)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return 0;
> 
>  	if (hwmgr->hwmgr_func->get_mclk == NULL) {
> @@ -391,7 +391,7 @@ static void pp_dpm_powergate_vce(void *handle,
> bool gate)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return;
> 
>  	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
> @@ -405,7 +405,7 @@ static void pp_dpm_powergate_uvd(void *handle,
> bool gate)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return;
> 
>  	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
> @@ -420,7 +420,7 @@ static int pp_dpm_dispatch_tasks(void *handle,
> enum amd_pp_task task_id,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	return hwmgr_handle_task(hwmgr, task_id, user_state);
> @@ -432,7 +432,7 @@ static enum amd_pm_state_type
> pp_dpm_get_current_power_state(void *handle)
>  	struct pp_power_state *state;
>  	enum amd_pm_state_type pm_type;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !hwmgr->current_ps)
> +	if (!hwmgr || !hwmgr->current_ps)
>  		return -EINVAL;
> 
>  	state = hwmgr->current_ps;
> @@ -462,7 +462,7 @@ static int pp_dpm_set_fan_control_mode(void
> *handle, uint32_t mode)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EOPNOTSUPP;
> 
>  	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
> @@ -480,7 +480,7 @@ static int pp_dpm_get_fan_control_mode(void
> *handle, uint32_t *fan_mode)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EOPNOTSUPP;
> 
>  	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
> @@ -497,7 +497,7 @@ static int pp_dpm_set_fan_speed_pwm(void
> *handle, uint32_t speed)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EOPNOTSUPP;
> 
>  	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
> @@ -513,7 +513,7 @@ static int pp_dpm_get_fan_speed_pwm(void
> *handle, uint32_t *speed)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EOPNOTSUPP;
> 
>  	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
> @@ -529,7 +529,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle,
> uint32_t *rpm)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EOPNOTSUPP;
> 
>  	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
> @@ -545,7 +545,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle,
> uint32_t rpm)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EOPNOTSUPP;
> 
>  	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
> @@ -565,7 +565,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
> 
>  	memset(data, 0, sizeof(*data));
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!hwmgr->ps)
> +	if (!hwmgr || !hwmgr->ps)
>  		return -EINVAL;
> 
>  	data->nums = hwmgr->num_ps;
> @@ -597,7 +597,7 @@ static int pp_dpm_get_pp_table(void *handle, char
> **table)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!hwmgr->soft_pp_table)
> +	if (!hwmgr || !hwmgr->soft_pp_table)
>  		return -EINVAL;
> 
>  	*table = (char *)hwmgr->soft_pp_table;
> @@ -625,7 +625,7 @@ static int pp_dpm_set_pp_table(void *handle, const
> char *buf, size_t size)
>  	struct pp_hwmgr *hwmgr = handle;
>  	int ret = -ENOMEM;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (!hwmgr->hardcode_pp_table) {
> @@ -655,7 +655,7 @@ static int pp_dpm_force_clock_level(void *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
> @@ -676,7 +676,7 @@ static int pp_dpm_print_clock_levels(void *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
> @@ -690,7 +690,7 @@ static int pp_dpm_get_sclk_od(void *handle)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
> @@ -704,7 +704,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t
> value)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
> @@ -719,7 +719,7 @@ static int pp_dpm_get_mclk_od(void *handle)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
> @@ -733,7 +733,7 @@ static int pp_dpm_set_mclk_od(void *handle,
> uint32_t value)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
> @@ -748,7 +748,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !value)
> +	if (!hwmgr || !value)
>  		return -EINVAL;
> 
>  	switch (idx) {
> @@ -774,7 +774,7 @@ pp_dpm_get_vce_clock_state(void *handle,
> unsigned idx)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return NULL;
> 
>  	if (idx < hwmgr->num_vce_state_tables)
> @@ -786,7 +786,7 @@ static int pp_get_power_profile_mode(void *handle,
> char *buf)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !hwmgr->hwmgr_func->get_power_profile_mode)
> +	if (!hwmgr || !hwmgr->hwmgr_func->get_power_profile_mode)
>  		return -EOPNOTSUPP;
>  	if (!buf)
>  		return -EINVAL;
> @@ -798,7 +798,7 @@ static int pp_set_power_profile_mode(void *handle,
> long *input, uint32_t size)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !hwmgr->hwmgr_func->set_power_profile_mode)
> +	if (!hwmgr || !hwmgr->hwmgr_func->set_power_profile_mode)
>  		return -EOPNOTSUPP;
> 
>  	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> @@ -813,7 +813,7 @@ static int pp_set_fine_grain_clk_vol(void *handle,
> uint32_t type, long *input, u
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
> @@ -826,7 +826,7 @@ static int pp_odn_edit_dpm_table(void *handle,
> uint32_t type, long *input, uint3
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
> @@ -860,7 +860,7 @@ static int pp_dpm_switch_power_profile(void
> *handle,
>  	long workload;
>  	uint32_t index;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
> @@ -900,7 +900,7 @@ static int pp_set_power_limit(void *handle, uint32_t
> limit)
>  	struct pp_hwmgr *hwmgr = handle;
>  	uint32_t max_power_limit;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
> @@ -932,7 +932,7 @@ static int pp_get_power_limit(void *handle, uint32_t
> *limit,
>  	struct pp_hwmgr *hwmgr = handle;
>  	int ret = 0;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!limit)
> +	if (!hwmgr || !limit)
>  		return -EINVAL;
> 
>  	if (power_type != PP_PWR_TYPE_SUSTAINED)
> @@ -965,7 +965,7 @@ static int pp_display_configuration_change(void
> *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	phm_store_dal_configuration_data(hwmgr, display_config);
> @@ -977,7 +977,7 @@ static int pp_get_display_power_level(void *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!output)
> +	if (!hwmgr || !output)
>  		return -EINVAL;
> 
>  	return phm_get_dal_power_level(hwmgr, output);
> @@ -991,7 +991,7 @@ static int pp_get_current_clocks(void *handle,
>  	struct pp_hwmgr *hwmgr = handle;
>  	int ret = 0;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	phm_get_dal_power_level(hwmgr, &simple_clocks);
> @@ -1035,7 +1035,7 @@ static int pp_get_clock_by_type(void *handle,
> enum amd_pp_clock_type type, struc
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (clocks == NULL)
> @@ -1050,7 +1050,7 @@ static int pp_get_clock_by_type_with_latency(void
> *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clocks)
> +	if (!hwmgr || !clocks)
>  		return -EINVAL;
> 
>  	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
> @@ -1062,7 +1062,7 @@ static int pp_get_clock_by_type_with_voltage(void
> *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clocks)
> +	if (!hwmgr || !clocks)
>  		return -EINVAL;
> 
>  	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
> @@ -1073,7 +1073,7 @@ static int
> pp_set_watermarks_for_clocks_ranges(void *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !clock_ranges)
> +	if (!hwmgr || !clock_ranges)
>  		return -EINVAL;
> 
>  	return phm_set_watermarks_for_clocks_ranges(hwmgr,
> @@ -1085,7 +1085,7 @@ static int pp_display_clock_voltage_request(void
> *handle,
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clock)
> +	if (!hwmgr || !clock)
>  		return -EINVAL;
> 
>  	return phm_display_clock_voltage_request(hwmgr, clock);
> @@ -1097,7 +1097,7 @@ static int
> pp_get_display_mode_validation_clocks(void *handle,
>  	struct pp_hwmgr *hwmgr = handle;
>  	int ret = 0;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clocks)
> +	if (!hwmgr || !clocks)
>  		return -EINVAL;
> 
>  	clocks->level = PP_DAL_POWERLEVEL_7;
> @@ -1112,7 +1112,7 @@ static int pp_dpm_powergate_mmhub(void
> *handle)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
> @@ -1127,7 +1127,7 @@ static int pp_dpm_powergate_gfx(void *handle,
> bool gate)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return 0;
> 
>  	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
> @@ -1142,7 +1142,7 @@ static void pp_dpm_powergate_acp(void *handle,
> bool gate)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return;
> 
>  	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
> @@ -1208,7 +1208,7 @@ static int pp_notify_smu_enable_pwe(void
> *handle)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
> @@ -1228,8 +1228,7 @@ static int pp_enable_mgpu_fan_boost(void
> *handle)
>  	if (!hwmgr)
>  		return -EINVAL;
> 
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||
> -	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
> +	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
>  		return 0;
> 
>  	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
> @@ -1241,7 +1240,7 @@ static int pp_set_min_deep_sleep_dcefclk(void
> *handle, uint32_t clock)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
> @@ -1258,7 +1257,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void
> *handle, uint32_t clock)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL)
> {
> @@ -1275,7 +1274,7 @@ static int pp_set_hard_min_fclk_by_freq(void
> *handle, uint32_t clock)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
> @@ -1292,7 +1291,7 @@ static int pp_set_active_display_count(void
> *handle, uint32_t count)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	return phm_set_active_display_count(hwmgr, count);
> @@ -1350,7 +1349,7 @@ static int pp_get_ppfeature_status(void *handle,
> char *buf)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !buf)
> +	if (!hwmgr || !buf)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
> @@ -1365,7 +1364,7 @@ static int pp_set_ppfeature_status(void *handle,
> uint64_t ppfeature_masks)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
> @@ -1395,7 +1394,7 @@ static int pp_smu_i2c_bus_access(void *handle,
> bool acquire)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
> @@ -1413,7 +1412,7 @@ static int pp_set_df_cstate(void *handle, enum
> pp_df_cstate state)
>  	if (!hwmgr)
>  		return -EINVAL;
> 
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
> || !hwmgr->hwmgr_func->set_df_cstate)
> +	if (!hwmgr->hwmgr_func->set_df_cstate)
>  		return 0;
> 
>  	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
> @@ -1428,7 +1427,7 @@ static int pp_set_xgmi_pstate(void *handle,
> uint32_t pstate)
>  	if (!hwmgr)
>  		return -EINVAL;
> 
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
> || !hwmgr->hwmgr_func->set_xgmi_pstate)
> +	if (!hwmgr->hwmgr_func->set_xgmi_pstate)
>  		return 0;
> 
>  	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
> @@ -1443,7 +1442,7 @@ static ssize_t pp_get_gpu_metrics(void *handle,
> void **table)
>  	if (!hwmgr)
>  		return -EINVAL;
> 
> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
> || !hwmgr->hwmgr_func->get_gpu_metrics)
> +	if (!hwmgr->hwmgr_func->get_gpu_metrics)
>  		return -EOPNOTSUPP;
> 
>  	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
> @@ -1453,7 +1452,7 @@ static int pp_gfx_state_change_set(void *handle,
> uint32_t state)
>  {
>  	struct pp_hwmgr *hwmgr = handle;
> 
> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> +	if (!hwmgr)
>  		return -EINVAL;
> 
>  	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 96a3388c2cb7..97c57a6cf314 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -68,9 +68,6 @@ static int smu_sys_get_pp_feature_mask(void *handle,
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	return smu_get_pp_feature_mask(smu, buf);
>  }
> 
> @@ -79,9 +76,6 @@ static int smu_sys_set_pp_feature_mask(void *handle,
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	return smu_set_pp_feature_mask(smu, new_mask);
>  }
> 
> @@ -219,13 +213,6 @@ static int smu_dpm_set_power_gate(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled) {
> -		dev_WARN(smu->adev->dev,
> -			 "SMU uninitialized but power %s requested
> for %u!\n",
> -			 gate ? "gate" : "ungate", block_type);
> -		return -EOPNOTSUPP;
> -	}
> -
>  	switch (block_type) {
>  	/*
>  	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
> @@ -315,9 +302,6 @@ static void smu_restore_dpm_user_profile(struct
> smu_context *smu)
>  	if (!smu->adev->in_suspend)
>  		return;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return;
> -
>  	/* Enable restore flag */
>  	smu->user_dpm_profile.flags |=
> SMU_DPM_USER_PROFILE_RESTORE;
> 
> @@ -428,9 +412,6 @@ static int smu_sys_get_pp_table(void *handle,
>  	struct smu_context *smu = handle;
>  	struct smu_table_context *smu_table = &smu->smu_table;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu_table->power_play_table && !smu_table-
> >hardcode_pptable)
>  		return -EINVAL;
> 
> @@ -451,9 +432,6 @@ static int smu_sys_set_pp_table(void *handle,
>  	ATOM_COMMON_TABLE_HEADER *header =
> (ATOM_COMMON_TABLE_HEADER *)buf;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (header->usStructureSize != size) {
>  		dev_err(smu->adev->dev, "pp table size not matched !\n");
>  		return -EIO;
> @@ -1564,9 +1542,6 @@ static int smu_display_configuration_change(void
> *handle,
>  	int index = 0;
>  	int num_of_active_display = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!display_config)
>  		return -EINVAL;
> 
> @@ -1704,9 +1679,6 @@ static int smu_handle_task(struct smu_context
> *smu,
>  {
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	switch (task_id) {
>  	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
>  		ret = smu_pre_display_config_changed(smu);
> @@ -1745,9 +1717,6 @@ static int smu_switch_power_profile(void *handle,
>  	long workload;
>  	uint32_t index;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
>  		return -EINVAL;
> 
> @@ -1775,9 +1744,6 @@ static enum amd_dpm_forced_level
> smu_get_performance_level(void *handle)
>  	struct smu_context *smu = handle;
>  	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
>  		return -EINVAL;
> 
> @@ -1791,9 +1757,6 @@ static int smu_force_performance_level(void
> *handle,
>  	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
>  		return -EINVAL;
> 
> @@ -1817,9 +1780,6 @@ static int smu_set_display_count(void *handle,
> uint32_t count)
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	return smu_init_display_count(smu, count);
>  }
> 
> @@ -1830,9 +1790,6 @@ static int smu_force_smuclk_levels(struct
> smu_context *smu,
>  	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu_dpm_ctx->dpm_level !=
> AMD_DPM_FORCED_LEVEL_MANUAL) {
>  		dev_dbg(smu->adev->dev, "force clock level is for dpm
> manual mode only.\n");
>  		return -EINVAL;
> @@ -1917,9 +1874,6 @@ static int smu_set_df_cstate(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
>  		return 0;
> 
> @@ -1934,9 +1888,6 @@ int smu_allow_xgmi_power_down(struct
> smu_context *smu, bool en)
>  {
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
>  		return 0;
> 
> @@ -1947,22 +1898,11 @@ int smu_allow_xgmi_power_down(struct
> smu_context *smu, bool en)
>  	return ret;
>  }
> 
> -int smu_write_watermarks_table(struct smu_context *smu)
> -{
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
> -	return smu_set_watermarks_table(smu, NULL);
> -}
> -
>  static int smu_set_watermarks_for_clock_ranges(void *handle,
>  					       struct pp_smu_wm_range_sets
> *clock_ranges)
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->disable_watermark)
>  		return 0;
> 
> @@ -1973,9 +1913,6 @@ int smu_set_ac_dc(struct smu_context *smu)
>  {
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	/* controlled by firmware */
>  	if (smu->dc_controlled_by_gpio)
>  		return 0;
> @@ -2083,9 +2020,6 @@ static int smu_set_fan_speed_rpm(void *handle,
> uint32_t speed)
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs->set_fan_speed_rpm)
>  		return -EOPNOTSUPP;
> 
> @@ -2126,9 +2060,6 @@ int smu_get_power_limit(void *handle,
>  	uint32_t limit_type;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	switch(pp_power_type) {
>  	case PP_PWR_TYPE_SUSTAINED:
>  		limit_type = SMU_DEFAULT_PPT_LIMIT;
> @@ -2199,9 +2130,6 @@ static int smu_set_power_limit(void *handle,
> uint32_t limit)
>  	uint32_t limit_type = limit >> 24;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	limit &= (1<<24)-1;
>  	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
>  		if (smu->ppt_funcs->set_power_limit)
> @@ -2230,9 +2158,6 @@ static int smu_print_smuclk_levels(struct
> smu_context *smu, enum smu_clk_type cl
>  {
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->print_clk_levels)
>  		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
> 
> @@ -2319,9 +2244,6 @@ static int smu_od_edit_dpm_table(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->od_edit_dpm_table) {
>  		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input,
> size);
>  	}
> @@ -2340,9 +2262,6 @@ static int smu_read_sensor(void *handle,
>  	int ret = 0;
>  	uint32_t *size, size_val;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!data || !size_arg)
>  		return -EINVAL;
> 
> @@ -2399,8 +2318,7 @@ static int smu_get_power_profile_mode(void
> *handle, char *buf)
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled ||
> -	    !smu->ppt_funcs->get_power_profile_mode)
> +	if (!smu->ppt_funcs->get_power_profile_mode)
>  		return -EOPNOTSUPP;
>  	if (!buf)
>  		return -EINVAL;
> @@ -2414,8 +2332,7 @@ static int smu_set_power_profile_mode(void
> *handle,
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled ||
> -	    !smu->ppt_funcs->set_power_profile_mode)
> +	if (!smu->ppt_funcs->set_power_profile_mode)
>  		return -EOPNOTSUPP;
> 
>  	return smu_bump_power_profile_mode(smu, param, param_size);
> @@ -2426,9 +2343,6 @@ static int smu_get_fan_control_mode(void *handle,
> u32 *fan_mode)
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs->get_fan_control_mode)
>  		return -EOPNOTSUPP;
> 
> @@ -2445,9 +2359,6 @@ static int smu_set_fan_control_mode(void *handle,
> u32 value)
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs->set_fan_control_mode)
>  		return -EOPNOTSUPP;
> 
> @@ -2478,9 +2389,6 @@ static int smu_get_fan_speed_pwm(void *handle,
> u32 *speed)
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs->get_fan_speed_pwm)
>  		return -EOPNOTSUPP;
> 
> @@ -2497,9 +2405,6 @@ static int smu_set_fan_speed_pwm(void *handle,
> u32 speed)
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs->set_fan_speed_pwm)
>  		return -EOPNOTSUPP;
> 
> @@ -2524,9 +2429,6 @@ static int smu_get_fan_speed_rpm(void *handle,
> uint32_t *speed)
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs->get_fan_speed_rpm)
>  		return -EOPNOTSUPP;
> 
> @@ -2542,9 +2444,6 @@ static int smu_set_deep_sleep_dcefclk(void
> *handle, uint32_t clk)
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	return smu_set_min_dcef_deep_sleep(smu, clk);
>  }
> 
> @@ -2556,9 +2455,6 @@ static int
> smu_get_clock_by_type_with_latency(void *handle,
>  	enum smu_clk_type clk_type;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
>  		switch (type) {
>  		case amd_pp_sys_clock:
> @@ -2590,9 +2486,6 @@ static int smu_display_clock_voltage_request(void
> *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->display_clock_voltage_request)
>  		ret = smu->ppt_funcs->display_clock_voltage_request(smu,
> clock_req);
> 
> @@ -2606,9 +2499,6 @@ static int
> smu_display_disable_memory_clock_switch(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = -EINVAL;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->display_disable_memory_clock_switch)
>  		ret = smu->ppt_funcs-
> >display_disable_memory_clock_switch(smu,
> disable_memory_clock_switch);
> 
> @@ -2621,9 +2511,6 @@ static int smu_set_xgmi_pstate(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->set_xgmi_pstate)
>  		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
> 
> @@ -2722,9 +2609,6 @@ static int
> smu_get_max_sustainable_clocks_by_dc(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
>  		ret = smu->ppt_funcs-
> >get_max_sustainable_clocks_by_dc(smu, max_clocks);
> 
> @@ -2738,9 +2622,6 @@ static int smu_get_uclk_dpm_states(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->get_uclk_dpm_states)
>  		ret = smu->ppt_funcs->get_uclk_dpm_states(smu,
> clock_values_in_khz, num_states);
> 
> @@ -2752,9 +2633,6 @@ static enum amd_pm_state_type
> smu_get_current_power_state(void *handle)
>  	struct smu_context *smu = handle;
>  	enum amd_pm_state_type pm_state =
> POWER_STATE_TYPE_DEFAULT;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->get_current_power_state)
>  		pm_state = smu->ppt_funcs-
> >get_current_power_state(smu);
> 
> @@ -2767,9 +2645,6 @@ static int smu_get_dpm_clock_table(void *handle,
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->get_dpm_clock_table)
>  		ret = smu->ppt_funcs->get_dpm_clock_table(smu,
> clock_table);
> 
> @@ -2780,9 +2655,6 @@ static ssize_t smu_sys_get_gpu_metrics(void
> *handle, void **table)
>  {
>  	struct smu_context *smu = handle;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (!smu->ppt_funcs->get_gpu_metrics)
>  		return -EOPNOTSUPP;
> 
> @@ -2794,9 +2666,6 @@ static int smu_enable_mgpu_fan_boost(void
> *handle)
>  	struct smu_context *smu = handle;
>  	int ret = 0;
> 
> -	if (!smu->adev->pm.dpm_enabled)
> -		return -EOPNOTSUPP;
> -
>  	if (smu->ppt_funcs->enable_mgpu_fan_boost)
>  		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index 39d169440d15..bced761f3f96 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -1399,7 +1399,6 @@ extern const struct amd_ip_funcs smu_ip_funcs;
> 
>  bool is_support_sw_smu(struct amdgpu_device *adev);
>  bool is_support_cclk_dpm(struct amdgpu_device *adev);
> -int smu_write_watermarks_table(struct smu_context *smu);
> 
>  int smu_get_dpm_freq_range(struct smu_context *smu, enum
> smu_clk_type clk_type,
>  			   uint32_t *min, uint32_t *max);
> --
> 2.29.0

^ permalink raw reply	[flat|nested] 23+ messages in thread

* RE: [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c
  2022-02-11 13:39   ` Lazar, Lijo
@ 2022-02-17  2:35     ` Quan, Evan
  2022-02-17  4:55       ` Lazar, Lijo
  0 siblings, 1 reply; 23+ messages in thread
From: Quan, Evan @ 2022-02-17  2:35 UTC (permalink / raw)
  To: Lazar, Lijo, amd-gfx; +Cc: Deucher, Alexander, rui.huang

[AMD Official Use Only]



> -----Original Message-----
> From: Lazar, Lijo <Lijo.Lazar@amd.com>
> Sent: Friday, February 11, 2022 9:40 PM
> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>;
> rui.huang@amd.com
> Subject: Re: [PATCH 05/12] drm/amd/pm: move the check for dpm
> enablement to amdgpu_dpm.c
> 
> 
> 
> On 2/11/2022 1:22 PM, Evan Quan wrote:
> > Instead of checking this in every instance(framework), moving that check
> to
> > amdgpu_dpm.c is more proper. And that can make code clean and tidy.
> >
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > Change-Id: I2f83a3b860e8aa12cc86f119011f520fbe21a301
> > ---
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |   5 +-
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |  16 +-
> >   drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 277
> ++++++++++++++++--
> >   drivers/gpu/drm/amd/pm/amdgpu_pm.c            |  25 +-
> >   drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  12 +-
> >   .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c    |   4 -
> >   .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 117 ++++----
> >   drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 135 +--------
> >   drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   1 -
> >   9 files changed, 352 insertions(+), 240 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > index 2c929fa40379..fff0e6a3882e 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > @@ -261,11 +261,14 @@ static int amdgpu_ctx_get_stable_pstate(struct
> amdgpu_ctx *ctx,
> >   {
> >   	struct amdgpu_device *adev = ctx->adev;
> >   	enum amd_dpm_forced_level current_level;
> > +	int ret = 0;
> >
> >   	if (!ctx)
> >   		return -EINVAL;
> >
> > -	current_level = amdgpu_dpm_get_performance_level(adev);
> > +	ret = amdgpu_dpm_get_performance_level(adev, &current_level);
> > +	if (ret)
> > +		return ret;
> >
> >   	switch (current_level) {
> >   	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> > index 9f985bd463be..56144f25b720 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> > @@ -813,15 +813,17 @@ int amdgpu_info_ioctl(struct drm_device *dev,
> void *data, struct drm_file *filp)
> >   		unsigned i;
> >   		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
> >   		struct amd_vce_state *vce_state;
> > +		int ret = 0;
> >
> >   		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
> > -			vce_state =
> amdgpu_dpm_get_vce_clock_state(adev, i);
> > -			if (vce_state) {
> > -				vce_clk_table.entries[i].sclk = vce_state-
> >sclk;
> > -				vce_clk_table.entries[i].mclk = vce_state-
> >mclk;
> > -				vce_clk_table.entries[i].eclk = vce_state-
> >evclk;
> > -				vce_clk_table.num_valid_entries++;
> > -			}
> > +			ret = amdgpu_dpm_get_vce_clock_state(adev, i,
> vce_state);
> > +			if (ret)
> > +				return ret;
> > +
> > +			vce_clk_table.entries[i].sclk = vce_state->sclk;
> > +			vce_clk_table.entries[i].mclk = vce_state->mclk;
> > +			vce_clk_table.entries[i].eclk = vce_state->evclk;
> > +			vce_clk_table.num_valid_entries++;
> >   		}
> >
> >   		return copy_to_user(out, &vce_clk_table,
> > diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > index 1d63f1e8884c..b46ae0063047 100644
> > --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > @@ -41,6 +41,9 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device
> *adev, bool low)
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return 0;
> > +
> >   	if (!pp_funcs->get_sclk)
> >   		return 0;
> >
> > @@ -57,6 +60,9 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device
> *adev, bool low)
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return 0;
> > +
> >   	if (!pp_funcs->get_mclk)
> >   		return 0;
> >
> > @@ -74,6 +80,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct
> amdgpu_device *adev, uint32_t block
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF :
> POWER_STATE_ON;
> >
> > +	if (!adev->pm.dpm_enabled) {
> > +		dev_WARN(adev->dev,
> > +			 "SMU uninitialized but power %s requested
> for %u!\n",
> > +			 gate ? "gate" : "ungate", block_type);
> > +		return -EOPNOTSUPP;
> > +	}
> > +
> >   	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
> >   		dev_dbg(adev->dev, "IP block%d already in the target %s
> state!",
> >   				block_type, gate ? "gate" : "ungate");
> > @@ -261,6 +274,9 @@ int amdgpu_dpm_switch_power_profile(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (amdgpu_sriov_vf(adev))
> >   		return 0;
> >
> > @@ -280,6 +296,9 @@ int amdgpu_dpm_set_xgmi_pstate(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = pp_funcs->set_xgmi_pstate(adev-
> >powerplay.pp_handle,
> > @@ -297,6 +316,9 @@ int amdgpu_dpm_set_df_cstate(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	void *pp_handle = adev->powerplay.pp_handle;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (pp_funcs && pp_funcs->set_df_cstate) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
> > @@ -311,6 +333,9 @@ int amdgpu_dpm_allow_xgmi_power_down(struct
> amdgpu_device *adev, bool en)
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (is_support_sw_smu(adev)) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = smu_allow_xgmi_power_down(smu, en);
> > @@ -327,6 +352,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct
> amdgpu_device *adev)
> >   			adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
> > @@ -344,6 +372,9 @@ int amdgpu_dpm_set_clockgating_by_smu(struct
> amdgpu_device *adev,
> >   			adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
> > @@ -362,6 +393,9 @@ int amdgpu_dpm_smu_i2c_bus_access(struct
> amdgpu_device *adev,
> >   			adev->powerplay.pp_funcs;
> >   	int ret = -EOPNOTSUPP;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> 
> I2C bus access doesn't need DPM to be enabled.
[Quan, Evan] The "adev->pm.dpm_enabled" flag is a little confusing. It does not actually mean DPM features need to be enabled.
Instead, it just tells the SMU ip initialization process has been fully completed and the API is designed to be supported under such scenario.

Unless the API is expected to be supported under the following scenarios also, otherwise it needs to be guarded by "adev->pm.dpm_enabled".
- Even DPM is disabled explicitly (by module parameter "dpm=0")
  - some initial setups related APUs(smu_dpm_set_vcn/ jpeg_enable) fall into above scenario
- The deinitialization was performed but reinitialization has not yet been kicked (resuming from suspend)
  - those gpu reset related APIs fall into above scenario

As I cannot see any reason to support the I2C bus access API under the two scenarios above. 
So I think the guard "adev->pm.dpm_enabled" is reasonable here.
> 
> >   	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
> > @@ -398,6 +432,9 @@ int amdgpu_dpm_read_sensor(struct
> amdgpu_device *adev, enum amd_pp_sensors senso
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = -EINVAL;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!data || !size)
> >   		return -EINVAL;
> >
> > @@ -485,6 +522,9 @@ int amdgpu_dpm_handle_passthrough_sbr(struct
> amdgpu_device *adev, bool enable)
> >   {
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> 
> Please double check on this one also.
> 
> >   	if (is_support_sw_smu(adev)) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = smu_handle_passthrough_sbr(adev-
> >powerplay.pp_handle,
> > @@ -500,6 +540,9 @@ int
> amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev,
> uint32_t size)
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	mutex_lock(&adev->pm.mutex);
> >   	ret = smu_send_hbm_bad_pages_num(smu, size);
> >   	mutex_unlock(&adev->pm.mutex);
> > @@ -514,6 +557,9 @@ int amdgpu_dpm_get_dpm_freq_range(struct
> amdgpu_device *adev,
> >   {
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (type != PP_SCLK)
> >   		return -EINVAL;
> >
> > @@ -538,6 +584,9 @@ int amdgpu_dpm_set_soft_freq_range(struct
> amdgpu_device *adev,
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (type != PP_SCLK)
> >   		return -EINVAL;
> >
> > @@ -556,14 +605,18 @@ int amdgpu_dpm_set_soft_freq_range(struct
> amdgpu_device *adev,
> >
> >   int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
> >   {
> > -	struct smu_context *smu = adev->powerplay.pp_handle;
> > +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!is_support_sw_smu(adev))
> >   		return 0;
> >
> >   	mutex_lock(&adev->pm.mutex);
> > -	ret = smu_write_watermarks_table(smu);
> > +	ret = pp_funcs->set_watermarks_for_clock_ranges(adev-
> >powerplay.pp_handle,
> > +							NULL);
> >   	mutex_unlock(&adev->pm.mutex);
> >
> >   	return ret;
> > @@ -576,6 +629,9 @@ int amdgpu_dpm_wait_for_event(struct
> amdgpu_device *adev,
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> 
> In this case also DPM doesn't need to be enabled.
[Quan, Evan] This seems used by mode2 reset only. Maybe using the guard "amdgpu_dpm_is_smc_alive" implemented in patch7 is more proper.
> 
> In general this patch assumes DPM interfaces to continue. There was a
> discussion around getting rid of dpm and moving to smu component based
> interface. This patch goes in the opposite direction.
[Quan, Evan] No, they do not conflict. We can still advance in that way.
I do not want to put those swsmu based interfaces into an intermediate state(some have "dpm_enabled" guard in amdgpu_dpm.c while others in amdgpu_smu.c).
That was my only consideration.

BR
Evan
> 
> Thanks,
> Lijo
> 
> >   	if (!is_support_sw_smu(adev))
> >   		return -EOPNOTSUPP;
> >
> > @@ -591,6 +647,9 @@ int amdgpu_dpm_get_status_gfxoff(struct
> amdgpu_device *adev, uint32_t *value)
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!is_support_sw_smu(adev))
> >   		return -EOPNOTSUPP;
> >
> > @@ -605,6 +664,9 @@ uint64_t
> amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device
> *adev)
> >   {
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return 0;
> > +
> >   	if (!is_support_sw_smu(adev))
> >   		return 0;
> >
> > @@ -619,6 +681,9 @@ uint64_t
> amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device
> *adev)
> >   void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
> >   				 enum gfx_change_state state)
> >   {
> > +	if (!adev->pm.dpm_enabled)
> > +		return;
> > +
> >   	mutex_lock(&adev->pm.mutex);
> >   	if (adev->powerplay.pp_funcs &&
> >   	    adev->powerplay.pp_funcs->gfx_state_change_set)
> > @@ -632,27 +697,33 @@ int amdgpu_dpm_get_ecc_info(struct
> amdgpu_device *adev,
> >   {
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!is_support_sw_smu(adev))
> >   		return -EOPNOTSUPP;
> >
> >   	return smu_get_ecc_info(smu, umc_ecc);
> >   }
> >
> > -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct
> amdgpu_device *adev,
> > -						     uint32_t idx)
> > +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> > +				   uint32_t idx,
> > +				   struct amd_vce_state *vstate)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > -	struct amd_vce_state *vstate = NULL;
> > +
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> >
> >   	if (!pp_funcs->get_vce_clock_state)
> > -		return NULL;
> > +		return -EOPNOTSUPP;
> >
> >   	mutex_lock(&adev->pm.mutex);
> >   	vstate = pp_funcs->get_vce_clock_state(adev-
> >powerplay.pp_handle,
> >   					       idx);
> >   	mutex_unlock(&adev->pm.mutex);
> >
> > -	return vstate;
> > +	return 0;
> >   }
> >
> >   void amdgpu_dpm_get_current_power_state(struct amdgpu_device
> *adev,
> > @@ -660,6 +731,9 @@ void
> amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return;
> > +
> >   	mutex_lock(&adev->pm.mutex);
> >
> >   	if (!pp_funcs->get_current_power_state) {
> > @@ -679,6 +753,9 @@ void
> amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
> >   void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
> >   				enum amd_pm_state_type state)
> >   {
> > +	if (!adev->pm.dpm_enabled)
> > +		return;
> > +
> >   	mutex_lock(&adev->pm.mutex);
> >   	adev->pm.dpm.user_state = state;
> >   	mutex_unlock(&adev->pm.mutex);
> > @@ -692,19 +769,22 @@ void amdgpu_dpm_set_power_state(struct
> amdgpu_device *adev,
> >   		amdgpu_dpm_compute_clocks(adev);
> >   }
> >
> > -enum amd_dpm_forced_level
> amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
> > +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
> > +				     enum amd_dpm_forced_level *level)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > -	enum amd_dpm_forced_level level;
> > +
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> >
> >   	mutex_lock(&adev->pm.mutex);
> >   	if (pp_funcs->get_performance_level)
> > -		level = pp_funcs->get_performance_level(adev-
> >powerplay.pp_handle);
> > +		*level = pp_funcs->get_performance_level(adev-
> >powerplay.pp_handle);
> >   	else
> > -		level = adev->pm.dpm.forced_level;
> > +		*level = adev->pm.dpm.forced_level;
> >   	mutex_unlock(&adev->pm.mutex);
> >
> > -	return level;
> > +	return 0;
> >   }
> >
> >   int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
> > @@ -717,13 +797,16 @@ int
> amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
> >
> 	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
> >
> 	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->force_performance_level)
> >   		return 0;
> >
> >   	if (adev->pm.dpm.thermal_active)
> >   		return -EINVAL;
> >
> > -	current_level = amdgpu_dpm_get_performance_level(adev);
> > +	amdgpu_dpm_get_performance_level(adev, &current_level);
> >   	if (current_level == level)
> >   		return 0;
> >
> > @@ -783,6 +866,9 @@ int amdgpu_dpm_get_pp_num_states(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_pp_num_states)
> >   		return -EOPNOTSUPP;
> >
> > @@ -801,6 +887,9 @@ int amdgpu_dpm_dispatch_task(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->dispatch_tasks)
> >   		return -EOPNOTSUPP;
> >
> > @@ -818,6 +907,9 @@ int amdgpu_dpm_get_pp_table(struct
> amdgpu_device *adev, char **table)
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_pp_table)
> >   		return 0;
> >
> > @@ -837,6 +929,9 @@ int amdgpu_dpm_set_fine_grain_clk_vol(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_fine_grain_clk_vol)
> >   		return 0;
> >
> > @@ -858,6 +953,9 @@ int amdgpu_dpm_odn_edit_dpm_table(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->odn_edit_dpm_table)
> >   		return 0;
> >
> > @@ -878,6 +976,9 @@ int amdgpu_dpm_print_clock_levels(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->print_clock_levels)
> >   		return 0;
> >
> > @@ -917,6 +1018,9 @@ int amdgpu_dpm_set_ppfeature_status(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_ppfeature_status)
> >   		return 0;
> >
> > @@ -933,6 +1037,9 @@ int amdgpu_dpm_get_ppfeature_status(struct
> amdgpu_device *adev, char *buf)
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_ppfeature_status)
> >   		return 0;
> >
> > @@ -951,6 +1058,9 @@ int amdgpu_dpm_force_clock_level(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->force_clock_level)
> >   		return 0;
> >
> > @@ -963,27 +1073,33 @@ int amdgpu_dpm_force_clock_level(struct
> amdgpu_device *adev,
> >   	return ret;
> >   }
> >
> > -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
> > +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev,
> > +			   uint32_t *value)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > -	int ret = 0;
> > +
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> >
> >   	if (!pp_funcs->get_sclk_od)
> > -		return 0;
> > +		return -EOPNOTSUPP;
> >
> >   	mutex_lock(&adev->pm.mutex);
> > -	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
> > +	*value = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
> >   	mutex_unlock(&adev->pm.mutex);
> >
> > -	return ret;
> > +	return 0;
> >   }
> >
> >   int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t
> value)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (is_support_sw_smu(adev))
> > -		return 0;
> > +		return -EOPNOTSUPP;
> >
> >   	mutex_lock(&adev->pm.mutex);
> >   	if (pp_funcs->set_sclk_od)
> > @@ -1000,27 +1116,33 @@ int amdgpu_dpm_set_sclk_od(struct
> amdgpu_device *adev, uint32_t value)
> >   	return 0;
> >   }
> >
> > -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
> > +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev,
> > +			   uint32_t *value)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > -	int ret = 0;
> > +
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> >
> >   	if (!pp_funcs->get_mclk_od)
> > -		return 0;
> > +		return -EOPNOTSUPP;
> >
> >   	mutex_lock(&adev->pm.mutex);
> > -	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
> > +	*value = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
> >   	mutex_unlock(&adev->pm.mutex);
> >
> > -	return ret;
> > +	return 0;
> >   }
> >
> >   int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t
> value)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (is_support_sw_smu(adev))
> > -		return 0;
> > +		return -EOPNOTSUPP;
> >
> >   	mutex_lock(&adev->pm.mutex);
> >   	if (pp_funcs->set_mclk_od)
> > @@ -1043,6 +1165,9 @@ int
> amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_power_profile_mode)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1060,6 +1185,9 @@ int
> amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_power_profile_mode)
> >   		return 0;
> >
> > @@ -1077,6 +1205,9 @@ int amdgpu_dpm_get_gpu_metrics(struct
> amdgpu_device *adev, void **table)
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_gpu_metrics)
> >   		return 0;
> >
> > @@ -1094,6 +1225,9 @@ int amdgpu_dpm_get_fan_control_mode(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_fan_control_mode)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1111,6 +1245,9 @@ int amdgpu_dpm_set_fan_speed_pwm(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_fan_speed_pwm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1128,6 +1265,9 @@ int amdgpu_dpm_get_fan_speed_pwm(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_fan_speed_pwm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1145,6 +1285,9 @@ int amdgpu_dpm_get_fan_speed_rpm(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_fan_speed_rpm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1162,6 +1305,9 @@ int amdgpu_dpm_set_fan_speed_rpm(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_fan_speed_rpm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1179,6 +1325,9 @@ int amdgpu_dpm_set_fan_control_mode(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_fan_control_mode)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1198,6 +1347,9 @@ int amdgpu_dpm_get_power_limit(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_power_limit)
> >   		return -ENODATA;
> >
> > @@ -1217,6 +1369,9 @@ int amdgpu_dpm_set_power_limit(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_power_limit)
> >   		return -EINVAL;
> >
> > @@ -1232,6 +1387,9 @@ int amdgpu_dpm_is_cclk_dpm_supported(struct
> amdgpu_device *adev)
> >   {
> >   	bool cclk_dpm_supported = false;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return false;
> > +
> >   	if (!is_support_sw_smu(adev))
> >   		return false;
> >
> > @@ -1247,6 +1405,9 @@ int
> amdgpu_dpm_debugfs_print_current_performance_level(struct
> amdgpu_device *ade
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->debugfs_print_current_performance_level)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1265,6 +1426,9 @@ int
> amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_smu_prv_buf_details)
> >   		return -ENOSYS;
> >
> > @@ -1282,6 +1446,9 @@ int amdgpu_dpm_is_overdrive_supported(struct
> amdgpu_device *adev)
> >   	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return false;
> > +
> >   	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
> >   	    (is_support_sw_smu(adev) && smu->is_apu) ||
> >   		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
> > @@ -1297,6 +1464,9 @@ int amdgpu_dpm_set_pp_table(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_pp_table)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1313,6 +1483,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct
> amdgpu_device *adev)
> >   {
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return INT_MAX;
> > +
> >   	if (!is_support_sw_smu(adev))
> >   		return INT_MAX;
> >
> > @@ -1321,6 +1494,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct
> amdgpu_device *adev)
> >
> >   void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
> >   {
> > +	if (!adev->pm.dpm_enabled)
> > +		return;
> > +
> >   	if (!is_support_sw_smu(adev))
> >   		return;
> >
> > @@ -1333,6 +1509,9 @@ int
> amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->display_configuration_change)
> >   		return 0;
> >
> > @@ -1351,6 +1530,9 @@ int amdgpu_dpm_get_clock_by_type(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_clock_by_type)
> >   		return 0;
> >
> > @@ -1369,6 +1551,9 @@ int
> amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device
> *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_display_mode_validation_clocks)
> >   		return 0;
> >
> > @@ -1387,6 +1572,9 @@ int
> amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device
> *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_clock_by_type_with_latency)
> >   		return 0;
> >
> > @@ -1406,6 +1594,9 @@ int
> amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device
> *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_clock_by_type_with_voltage)
> >   		return 0;
> >
> > @@ -1424,6 +1615,9 @@ int
> amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device
> *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_watermarks_for_clocks_ranges)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1441,6 +1635,9 @@ int
> amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->display_clock_voltage_request)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1458,6 +1655,9 @@ int amdgpu_dpm_get_current_clocks(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_current_clocks)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1473,6 +1673,9 @@ void
> amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return;
> > +
> >   	if (!pp_funcs->notify_smu_enable_pwe)
> >   		return;
> >
> > @@ -1487,6 +1690,9 @@ int
> amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_active_display_count)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1504,6 +1710,9 @@ int
> amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->set_min_deep_sleep_dcefclk)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1520,6 +1729,9 @@ void
> amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return;
> > +
> >   	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
> >   		return;
> >
> > @@ -1534,6 +1746,9 @@ void
> amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return;
> > +
> >   	if (!pp_funcs->set_hard_min_fclk_by_freq)
> >   		return;
> >
> > @@ -1549,6 +1764,9 @@ int
> amdgpu_dpm_display_disable_memory_clock_switch(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->display_disable_memory_clock_switch)
> >   		return 0;
> >
> > @@ -1566,6 +1784,9 @@ int
> amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device
> *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1584,6 +1805,9 @@ enum pp_smu_status
> amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_uclk_dpm_states)
> >   		return -EOPNOTSUPP;
> >
> > @@ -1602,6 +1826,9 @@ int amdgpu_dpm_get_dpm_clock_table(struct
> amdgpu_device *adev,
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	int ret = 0;
> >
> > +	if (!adev->pm.dpm_enabled)
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs->get_dpm_clock_table)
> >   		return -EOPNOTSUPP;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> > index b0243068212b..84aab3bb9bdc 100644
> > --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> > +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> > @@ -273,11 +273,14 @@ static ssize_t
> amdgpu_get_power_dpm_force_performance_level(struct device *dev,
> >   		return ret;
> >   	}
> >
> > -	level = amdgpu_dpm_get_performance_level(adev);
> > +	ret = amdgpu_dpm_get_performance_level(adev, &level);
> >
> >   	pm_runtime_mark_last_busy(ddev->dev);
> >   	pm_runtime_put_autosuspend(ddev->dev);
> >
> > +	if (ret)
> > +		return ret;
> > +
> >   	return sysfs_emit(buf, "%s\n",
> >   			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
> "auto" :
> >   			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> > @@ -1241,11 +1244,14 @@ static ssize_t amdgpu_get_pp_sclk_od(struct
> device *dev,
> >   		return ret;
> >   	}
> >
> > -	value = amdgpu_dpm_get_sclk_od(adev);
> > +	ret = amdgpu_dpm_get_sclk_od(adev, &value);
> >
> >   	pm_runtime_mark_last_busy(ddev->dev);
> >   	pm_runtime_put_autosuspend(ddev->dev);
> >
> > +	if (ret)
> > +		return ret;
> > +
> >   	return sysfs_emit(buf, "%d\n", value);
> >   }
> >
> > @@ -1275,11 +1281,14 @@ static ssize_t amdgpu_set_pp_sclk_od(struct
> device *dev,
> >   		return ret;
> >   	}
> >
> > -	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
> > +	ret = amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
> >
> >   	pm_runtime_mark_last_busy(ddev->dev);
> >   	pm_runtime_put_autosuspend(ddev->dev);
> >
> > +	if (ret)
> > +		return ret;
> > +
> >   	return count;
> >   }
> >
> > @@ -1303,11 +1312,14 @@ static ssize_t amdgpu_get_pp_mclk_od(struct
> device *dev,
> >   		return ret;
> >   	}
> >
> > -	value = amdgpu_dpm_get_mclk_od(adev);
> > +	ret = amdgpu_dpm_get_mclk_od(adev, &value);
> >
> >   	pm_runtime_mark_last_busy(ddev->dev);
> >   	pm_runtime_put_autosuspend(ddev->dev);
> >
> > +	if (ret)
> > +		return ret;
> > +
> >   	return sysfs_emit(buf, "%d\n", value);
> >   }
> >
> > @@ -1337,11 +1349,14 @@ static ssize_t amdgpu_set_pp_mclk_od(struct
> device *dev,
> >   		return ret;
> >   	}
> >
> > -	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
> > +	ret = amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
> >
> >   	pm_runtime_mark_last_busy(ddev->dev);
> >   	pm_runtime_put_autosuspend(ddev->dev);
> >
> > +	if (ret)
> > +		return ret;
> > +
> >   	return count;
> >   }
> >
> > diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> > index ddfa55b59d02..49488aebd350 100644
> > --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> > +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> > @@ -429,12 +429,14 @@ void amdgpu_dpm_gfx_state_change(struct
> amdgpu_device *adev,
> >   				 enum gfx_change_state state);
> >   int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
> >   			    void *umc_ecc);
> > -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct
> amdgpu_device *adev,
> > -						     uint32_t idx);
> > +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
> > +				   uint32_t idx,
> > +				   struct amd_vce_state *vstate);
> >   void amdgpu_dpm_get_current_power_state(struct amdgpu_device
> *adev, enum amd_pm_state_type *state);
> >   void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
> >   				enum amd_pm_state_type state);
> > -enum amd_dpm_forced_level
> amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
> > +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
> > +				     enum amd_dpm_forced_level *level);
> >   int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
> >   				       enum amd_dpm_forced_level level);
> >   int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
> > @@ -464,9 +466,9 @@ int amdgpu_dpm_get_ppfeature_status(struct
> amdgpu_device *adev, char *buf);
> >   int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
> >   				 enum pp_clock_type type,
> >   				 uint32_t mask);
> > -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
> > +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev, uint32_t
> *value);
> >   int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t
> value);
> > -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
> > +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev, uint32_t
> *value);
> >   int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t
> value);
> >   int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device
> *adev,
> >   				      char *buf);
> > diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> > index 9613c6181c17..59550617cf54 100644
> > --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> > +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> > @@ -959,10 +959,6 @@ static int
> amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
> >   	int ret;
> >   	bool equal = false;
> >
> > -	/* if dpm init failed */
> > -	if (!adev->pm.dpm_enabled)
> > -		return 0;
> > -
> >   	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
> >   		/* add other state override checks here */
> >   		if ((!adev->pm.dpm.thermal_active) &&
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > index 991ac4adb263..bba923cfe08c 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > @@ -295,7 +295,7 @@ static int pp_set_clockgating_by_smu(void *handle,
> uint32_t msg_id)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
> > @@ -335,7 +335,7 @@ static int pp_dpm_force_performance_level(void
> *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (level == hwmgr->dpm_level)
> > @@ -353,7 +353,7 @@ static enum amd_dpm_forced_level
> pp_dpm_get_performance_level(
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	return hwmgr->dpm_level;
> > @@ -363,7 +363,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool
> low)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return 0;
> >
> >   	if (hwmgr->hwmgr_func->get_sclk == NULL) {
> > @@ -377,7 +377,7 @@ static uint32_t pp_dpm_get_mclk(void *handle,
> bool low)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return 0;
> >
> >   	if (hwmgr->hwmgr_func->get_mclk == NULL) {
> > @@ -391,7 +391,7 @@ static void pp_dpm_powergate_vce(void *handle,
> bool gate)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return;
> >
> >   	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
> > @@ -405,7 +405,7 @@ static void pp_dpm_powergate_uvd(void *handle,
> bool gate)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return;
> >
> >   	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
> > @@ -420,7 +420,7 @@ static int pp_dpm_dispatch_tasks(void *handle,
> enum amd_pp_task task_id,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	return hwmgr_handle_task(hwmgr, task_id, user_state);
> > @@ -432,7 +432,7 @@ static enum amd_pm_state_type
> pp_dpm_get_current_power_state(void *handle)
> >   	struct pp_power_state *state;
> >   	enum amd_pm_state_type pm_type;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !hwmgr->current_ps)
> > +	if (!hwmgr || !hwmgr->current_ps)
> >   		return -EINVAL;
> >
> >   	state = hwmgr->current_ps;
> > @@ -462,7 +462,7 @@ static int pp_dpm_set_fan_control_mode(void
> *handle, uint32_t mode)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EOPNOTSUPP;
> >
> >   	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
> > @@ -480,7 +480,7 @@ static int pp_dpm_get_fan_control_mode(void
> *handle, uint32_t *fan_mode)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EOPNOTSUPP;
> >
> >   	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
> > @@ -497,7 +497,7 @@ static int pp_dpm_set_fan_speed_pwm(void
> *handle, uint32_t speed)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EOPNOTSUPP;
> >
> >   	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
> > @@ -513,7 +513,7 @@ static int pp_dpm_get_fan_speed_pwm(void
> *handle, uint32_t *speed)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EOPNOTSUPP;
> >
> >   	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
> > @@ -529,7 +529,7 @@ static int pp_dpm_get_fan_speed_rpm(void
> *handle, uint32_t *rpm)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EOPNOTSUPP;
> >
> >   	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
> > @@ -545,7 +545,7 @@ static int pp_dpm_set_fan_speed_rpm(void
> *handle, uint32_t rpm)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EOPNOTSUPP;
> >
> >   	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
> > @@ -565,7 +565,7 @@ static int pp_dpm_get_pp_num_states(void
> *handle,
> >
> >   	memset(data, 0, sizeof(*data));
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!hwmgr->ps)
> > +	if (!hwmgr || !hwmgr->ps)
> >   		return -EINVAL;
> >
> >   	data->nums = hwmgr->num_ps;
> > @@ -597,7 +597,7 @@ static int pp_dpm_get_pp_table(void *handle, char
> **table)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!hwmgr->soft_pp_table)
> > +	if (!hwmgr || !hwmgr->soft_pp_table)
> >   		return -EINVAL;
> >
> >   	*table = (char *)hwmgr->soft_pp_table;
> > @@ -625,7 +625,7 @@ static int pp_dpm_set_pp_table(void *handle,
> const char *buf, size_t size)
> >   	struct pp_hwmgr *hwmgr = handle;
> >   	int ret = -ENOMEM;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (!hwmgr->hardcode_pp_table) {
> > @@ -655,7 +655,7 @@ static int pp_dpm_force_clock_level(void *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
> > @@ -676,7 +676,7 @@ static int pp_dpm_print_clock_levels(void *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
> > @@ -690,7 +690,7 @@ static int pp_dpm_get_sclk_od(void *handle)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
> > @@ -704,7 +704,7 @@ static int pp_dpm_set_sclk_od(void *handle,
> uint32_t value)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
> > @@ -719,7 +719,7 @@ static int pp_dpm_get_mclk_od(void *handle)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
> > @@ -733,7 +733,7 @@ static int pp_dpm_set_mclk_od(void *handle,
> uint32_t value)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
> > @@ -748,7 +748,7 @@ static int pp_dpm_read_sensor(void *handle, int
> idx,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !value)
> > +	if (!hwmgr || !value)
> >   		return -EINVAL;
> >
> >   	switch (idx) {
> > @@ -774,7 +774,7 @@ pp_dpm_get_vce_clock_state(void *handle,
> unsigned idx)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return NULL;
> >
> >   	if (idx < hwmgr->num_vce_state_tables)
> > @@ -786,7 +786,7 @@ static int pp_get_power_profile_mode(void
> *handle, char *buf)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !hwmgr->hwmgr_func->get_power_profile_mode)
> > +	if (!hwmgr || !hwmgr->hwmgr_func->get_power_profile_mode)
> >   		return -EOPNOTSUPP;
> >   	if (!buf)
> >   		return -EINVAL;
> > @@ -798,7 +798,7 @@ static int pp_set_power_profile_mode(void
> *handle, long *input, uint32_t size)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !hwmgr->hwmgr_func->set_power_profile_mode)
> > +	if (!hwmgr || !hwmgr->hwmgr_func->set_power_profile_mode)
> >   		return -EOPNOTSUPP;
> >
> >   	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> > @@ -813,7 +813,7 @@ static int pp_set_fine_grain_clk_vol(void *handle,
> uint32_t type, long *input, u
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
> > @@ -826,7 +826,7 @@ static int pp_odn_edit_dpm_table(void *handle,
> uint32_t type, long *input, uint3
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
> > @@ -860,7 +860,7 @@ static int pp_dpm_switch_power_profile(void
> *handle,
> >   	long workload;
> >   	uint32_t index;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
> > @@ -900,7 +900,7 @@ static int pp_set_power_limit(void *handle,
> uint32_t limit)
> >   	struct pp_hwmgr *hwmgr = handle;
> >   	uint32_t max_power_limit;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
> > @@ -932,7 +932,7 @@ static int pp_get_power_limit(void *handle,
> uint32_t *limit,
> >   	struct pp_hwmgr *hwmgr = handle;
> >   	int ret = 0;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!limit)
> > +	if (!hwmgr || !limit)
> >   		return -EINVAL;
> >
> >   	if (power_type != PP_PWR_TYPE_SUSTAINED)
> > @@ -965,7 +965,7 @@ static int pp_display_configuration_change(void
> *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	phm_store_dal_configuration_data(hwmgr, display_config);
> > @@ -977,7 +977,7 @@ static int pp_get_display_power_level(void *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!output)
> > +	if (!hwmgr || !output)
> >   		return -EINVAL;
> >
> >   	return phm_get_dal_power_level(hwmgr, output);
> > @@ -991,7 +991,7 @@ static int pp_get_current_clocks(void *handle,
> >   	struct pp_hwmgr *hwmgr = handle;
> >   	int ret = 0;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	phm_get_dal_power_level(hwmgr, &simple_clocks);
> > @@ -1035,7 +1035,7 @@ static int pp_get_clock_by_type(void *handle,
> enum amd_pp_clock_type type, struc
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (clocks == NULL)
> > @@ -1050,7 +1050,7 @@ static int
> pp_get_clock_by_type_with_latency(void *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clocks)
> > +	if (!hwmgr || !clocks)
> >   		return -EINVAL;
> >
> >   	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
> > @@ -1062,7 +1062,7 @@ static int
> pp_get_clock_by_type_with_voltage(void *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clocks)
> > +	if (!hwmgr || !clocks)
> >   		return -EINVAL;
> >
> >   	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
> > @@ -1073,7 +1073,7 @@ static int
> pp_set_watermarks_for_clocks_ranges(void *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !clock_ranges)
> > +	if (!hwmgr || !clock_ranges)
> >   		return -EINVAL;
> >
> >   	return phm_set_watermarks_for_clocks_ranges(hwmgr,
> > @@ -1085,7 +1085,7 @@ static int pp_display_clock_voltage_request(void
> *handle,
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clock)
> > +	if (!hwmgr || !clock)
> >   		return -EINVAL;
> >
> >   	return phm_display_clock_voltage_request(hwmgr, clock);
> > @@ -1097,7 +1097,7 @@ static int
> pp_get_display_mode_validation_clocks(void *handle,
> >   	struct pp_hwmgr *hwmgr = handle;
> >   	int ret = 0;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled ||!clocks)
> > +	if (!hwmgr || !clocks)
> >   		return -EINVAL;
> >
> >   	clocks->level = PP_DAL_POWERLEVEL_7;
> > @@ -1112,7 +1112,7 @@ static int pp_dpm_powergate_mmhub(void
> *handle)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
> > @@ -1127,7 +1127,7 @@ static int pp_dpm_powergate_gfx(void *handle,
> bool gate)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return 0;
> >
> >   	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
> > @@ -1142,7 +1142,7 @@ static void pp_dpm_powergate_acp(void *handle,
> bool gate)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return;
> >
> >   	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
> > @@ -1208,7 +1208,7 @@ static int pp_notify_smu_enable_pwe(void
> *handle)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
> > @@ -1228,8 +1228,7 @@ static int pp_enable_mgpu_fan_boost(void
> *handle)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||
> > -	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
> > +	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
> >   		return 0;
> >
> >   	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
> > @@ -1241,7 +1240,7 @@ static int pp_set_min_deep_sleep_dcefclk(void
> *handle, uint32_t clock)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
> > @@ -1258,7 +1257,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void
> *handle, uint32_t clock)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL)
> {
> > @@ -1275,7 +1274,7 @@ static int pp_set_hard_min_fclk_by_freq(void
> *handle, uint32_t clock)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
> > @@ -1292,7 +1291,7 @@ static int pp_set_active_display_count(void
> *handle, uint32_t count)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	return phm_set_active_display_count(hwmgr, count);
> > @@ -1350,7 +1349,7 @@ static int pp_get_ppfeature_status(void *handle,
> char *buf)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled || !buf)
> > +	if (!hwmgr || !buf)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
> > @@ -1365,7 +1364,7 @@ static int pp_set_ppfeature_status(void *handle,
> uint64_t ppfeature_masks)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
> > @@ -1395,7 +1394,7 @@ static int pp_smu_i2c_bus_access(void *handle,
> bool acquire)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
> > @@ -1413,7 +1412,7 @@ static int pp_set_df_cstate(void *handle, enum
> pp_df_cstate state)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
> || !hwmgr->hwmgr_func->set_df_cstate)
> > +	if (!hwmgr->hwmgr_func->set_df_cstate)
> >   		return 0;
> >
> >   	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
> > @@ -1428,7 +1427,7 @@ static int pp_set_xgmi_pstate(void *handle,
> uint32_t pstate)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
> || !hwmgr->hwmgr_func->set_xgmi_pstate)
> > +	if (!hwmgr->hwmgr_func->set_xgmi_pstate)
> >   		return 0;
> >
> >   	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
> > @@ -1443,7 +1442,7 @@ static ssize_t pp_get_gpu_metrics(void *handle,
> void **table)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
> || !hwmgr->hwmgr_func->get_gpu_metrics)
> > +	if (!hwmgr->hwmgr_func->get_gpu_metrics)
> >   		return -EOPNOTSUPP;
> >
> >   	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
> > @@ -1453,7 +1452,7 @@ static int pp_gfx_state_change_set(void *handle,
> uint32_t state)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
> >pm.dpm_enabled)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > index 96a3388c2cb7..97c57a6cf314 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > @@ -68,9 +68,6 @@ static int smu_sys_get_pp_feature_mask(void
> *handle,
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	return smu_get_pp_feature_mask(smu, buf);
> >   }
> >
> > @@ -79,9 +76,6 @@ static int smu_sys_set_pp_feature_mask(void
> *handle,
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	return smu_set_pp_feature_mask(smu, new_mask);
> >   }
> >
> > @@ -219,13 +213,6 @@ static int smu_dpm_set_power_gate(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled) {
> > -		dev_WARN(smu->adev->dev,
> > -			 "SMU uninitialized but power %s requested
> for %u!\n",
> > -			 gate ? "gate" : "ungate", block_type);
> > -		return -EOPNOTSUPP;
> > -	}
> > -
> >   	switch (block_type) {
> >   	/*
> >   	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
> > @@ -315,9 +302,6 @@ static void smu_restore_dpm_user_profile(struct
> smu_context *smu)
> >   	if (!smu->adev->in_suspend)
> >   		return;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return;
> > -
> >   	/* Enable restore flag */
> >   	smu->user_dpm_profile.flags |=
> SMU_DPM_USER_PROFILE_RESTORE;
> >
> > @@ -428,9 +412,6 @@ static int smu_sys_get_pp_table(void *handle,
> >   	struct smu_context *smu = handle;
> >   	struct smu_table_context *smu_table = &smu->smu_table;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu_table->power_play_table && !smu_table-
> >hardcode_pptable)
> >   		return -EINVAL;
> >
> > @@ -451,9 +432,6 @@ static int smu_sys_set_pp_table(void *handle,
> >   	ATOM_COMMON_TABLE_HEADER *header =
> (ATOM_COMMON_TABLE_HEADER *)buf;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (header->usStructureSize != size) {
> >   		dev_err(smu->adev->dev, "pp table size not matched !\n");
> >   		return -EIO;
> > @@ -1564,9 +1542,6 @@ static int smu_display_configuration_change(void
> *handle,
> >   	int index = 0;
> >   	int num_of_active_display = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!display_config)
> >   		return -EINVAL;
> >
> > @@ -1704,9 +1679,6 @@ static int smu_handle_task(struct smu_context
> *smu,
> >   {
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	switch (task_id) {
> >   	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
> >   		ret = smu_pre_display_config_changed(smu);
> > @@ -1745,9 +1717,6 @@ static int smu_switch_power_profile(void
> *handle,
> >   	long workload;
> >   	uint32_t index;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> >   		return -EINVAL;
> >
> > @@ -1775,9 +1744,6 @@ static enum amd_dpm_forced_level
> smu_get_performance_level(void *handle)
> >   	struct smu_context *smu = handle;
> >   	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> >   		return -EINVAL;
> >
> > @@ -1791,9 +1757,6 @@ static int smu_force_performance_level(void
> *handle,
> >   	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
> >   		return -EINVAL;
> >
> > @@ -1817,9 +1780,6 @@ static int smu_set_display_count(void *handle,
> uint32_t count)
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	return smu_init_display_count(smu, count);
> >   }
> >
> > @@ -1830,9 +1790,6 @@ static int smu_force_smuclk_levels(struct
> smu_context *smu,
> >   	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu_dpm_ctx->dpm_level !=
> AMD_DPM_FORCED_LEVEL_MANUAL) {
> >   		dev_dbg(smu->adev->dev, "force clock level is for dpm
> manual mode only.\n");
> >   		return -EINVAL;
> > @@ -1917,9 +1874,6 @@ static int smu_set_df_cstate(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
> >   		return 0;
> >
> > @@ -1934,9 +1888,6 @@ int smu_allow_xgmi_power_down(struct
> smu_context *smu, bool en)
> >   {
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
> >   		return 0;
> >
> > @@ -1947,22 +1898,11 @@ int smu_allow_xgmi_power_down(struct
> smu_context *smu, bool en)
> >   	return ret;
> >   }
> >
> > -int smu_write_watermarks_table(struct smu_context *smu)
> > -{
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> > -	return smu_set_watermarks_table(smu, NULL);
> > -}
> > -
> >   static int smu_set_watermarks_for_clock_ranges(void *handle,
> >   					       struct pp_smu_wm_range_sets
> *clock_ranges)
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->disable_watermark)
> >   		return 0;
> >
> > @@ -1973,9 +1913,6 @@ int smu_set_ac_dc(struct smu_context *smu)
> >   {
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	/* controlled by firmware */
> >   	if (smu->dc_controlled_by_gpio)
> >   		return 0;
> > @@ -2083,9 +2020,6 @@ static int smu_set_fan_speed_rpm(void *handle,
> uint32_t speed)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs->set_fan_speed_rpm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -2126,9 +2060,6 @@ int smu_get_power_limit(void *handle,
> >   	uint32_t limit_type;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	switch(pp_power_type) {
> >   	case PP_PWR_TYPE_SUSTAINED:
> >   		limit_type = SMU_DEFAULT_PPT_LIMIT;
> > @@ -2199,9 +2130,6 @@ static int smu_set_power_limit(void *handle,
> uint32_t limit)
> >   	uint32_t limit_type = limit >> 24;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	limit &= (1<<24)-1;
> >   	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
> >   		if (smu->ppt_funcs->set_power_limit)
> > @@ -2230,9 +2158,6 @@ static int smu_print_smuclk_levels(struct
> smu_context *smu, enum smu_clk_type cl
> >   {
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->print_clk_levels)
> >   		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
> >
> > @@ -2319,9 +2244,6 @@ static int smu_od_edit_dpm_table(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->od_edit_dpm_table) {
> >   		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input,
> size);
> >   	}
> > @@ -2340,9 +2262,6 @@ static int smu_read_sensor(void *handle,
> >   	int ret = 0;
> >   	uint32_t *size, size_val;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!data || !size_arg)
> >   		return -EINVAL;
> >
> > @@ -2399,8 +2318,7 @@ static int smu_get_power_profile_mode(void
> *handle, char *buf)
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled ||
> > -	    !smu->ppt_funcs->get_power_profile_mode)
> > +	if (!smu->ppt_funcs->get_power_profile_mode)
> >   		return -EOPNOTSUPP;
> >   	if (!buf)
> >   		return -EINVAL;
> > @@ -2414,8 +2332,7 @@ static int smu_set_power_profile_mode(void
> *handle,
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled ||
> > -	    !smu->ppt_funcs->set_power_profile_mode)
> > +	if (!smu->ppt_funcs->set_power_profile_mode)
> >   		return -EOPNOTSUPP;
> >
> >   	return smu_bump_power_profile_mode(smu, param, param_size);
> > @@ -2426,9 +2343,6 @@ static int smu_get_fan_control_mode(void
> *handle, u32 *fan_mode)
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs->get_fan_control_mode)
> >   		return -EOPNOTSUPP;
> >
> > @@ -2445,9 +2359,6 @@ static int smu_set_fan_control_mode(void
> *handle, u32 value)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs->set_fan_control_mode)
> >   		return -EOPNOTSUPP;
> >
> > @@ -2478,9 +2389,6 @@ static int smu_get_fan_speed_pwm(void
> *handle, u32 *speed)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs->get_fan_speed_pwm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -2497,9 +2405,6 @@ static int smu_set_fan_speed_pwm(void *handle,
> u32 speed)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs->set_fan_speed_pwm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -2524,9 +2429,6 @@ static int smu_get_fan_speed_rpm(void *handle,
> uint32_t *speed)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs->get_fan_speed_rpm)
> >   		return -EOPNOTSUPP;
> >
> > @@ -2542,9 +2444,6 @@ static int smu_set_deep_sleep_dcefclk(void
> *handle, uint32_t clk)
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	return smu_set_min_dcef_deep_sleep(smu, clk);
> >   }
> >
> > @@ -2556,9 +2455,6 @@ static int
> smu_get_clock_by_type_with_latency(void *handle,
> >   	enum smu_clk_type clk_type;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
> >   		switch (type) {
> >   		case amd_pp_sys_clock:
> > @@ -2590,9 +2486,6 @@ static int
> smu_display_clock_voltage_request(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->display_clock_voltage_request)
> >   		ret = smu->ppt_funcs->display_clock_voltage_request(smu,
> clock_req);
> >
> > @@ -2606,9 +2499,6 @@ static int
> smu_display_disable_memory_clock_switch(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = -EINVAL;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->display_disable_memory_clock_switch)
> >   		ret = smu->ppt_funcs-
> >display_disable_memory_clock_switch(smu,
> disable_memory_clock_switch);
> >
> > @@ -2621,9 +2511,6 @@ static int smu_set_xgmi_pstate(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->set_xgmi_pstate)
> >   		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
> >
> > @@ -2722,9 +2609,6 @@ static int
> smu_get_max_sustainable_clocks_by_dc(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
> >   		ret = smu->ppt_funcs-
> >get_max_sustainable_clocks_by_dc(smu, max_clocks);
> >
> > @@ -2738,9 +2622,6 @@ static int smu_get_uclk_dpm_states(void
> *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->get_uclk_dpm_states)
> >   		ret = smu->ppt_funcs->get_uclk_dpm_states(smu,
> clock_values_in_khz, num_states);
> >
> > @@ -2752,9 +2633,6 @@ static enum amd_pm_state_type
> smu_get_current_power_state(void *handle)
> >   	struct smu_context *smu = handle;
> >   	enum amd_pm_state_type pm_state =
> POWER_STATE_TYPE_DEFAULT;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->get_current_power_state)
> >   		pm_state = smu->ppt_funcs-
> >get_current_power_state(smu);
> >
> > @@ -2767,9 +2645,6 @@ static int smu_get_dpm_clock_table(void
> *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->get_dpm_clock_table)
> >   		ret = smu->ppt_funcs->get_dpm_clock_table(smu,
> clock_table);
> >
> > @@ -2780,9 +2655,6 @@ static ssize_t smu_sys_get_gpu_metrics(void
> *handle, void **table)
> >   {
> >   	struct smu_context *smu = handle;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (!smu->ppt_funcs->get_gpu_metrics)
> >   		return -EOPNOTSUPP;
> >
> > @@ -2794,9 +2666,6 @@ static int smu_enable_mgpu_fan_boost(void
> *handle)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->adev->pm.dpm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->enable_mgpu_fan_boost)
> >   		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > index 39d169440d15..bced761f3f96 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > @@ -1399,7 +1399,6 @@ extern const struct amd_ip_funcs smu_ip_funcs;
> >
> >   bool is_support_sw_smu(struct amdgpu_device *adev);
> >   bool is_support_cclk_dpm(struct amdgpu_device *adev);
> > -int smu_write_watermarks_table(struct smu_context *smu);
> >
> >   int smu_get_dpm_freq_range(struct smu_context *smu, enum
> smu_clk_type clk_type,
> >   			   uint32_t *min, uint32_t *max);
> >

^ permalink raw reply	[flat|nested] 23+ messages in thread

* RE: [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs
  2022-02-14  4:04   ` Lazar, Lijo
@ 2022-02-17  2:48     ` Quan, Evan
  2022-02-17  5:00       ` Lazar, Lijo
  0 siblings, 1 reply; 23+ messages in thread
From: Quan, Evan @ 2022-02-17  2:48 UTC (permalink / raw)
  To: Lazar, Lijo, amd-gfx; +Cc: Deucher, Alexander, rui.huang

[AMD Official Use Only]



> -----Original Message-----
> From: Lazar, Lijo <Lijo.Lazar@amd.com>
> Sent: Monday, February 14, 2022 12:04 PM
> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>;
> rui.huang@amd.com
> Subject: Re: [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu
> reset APIs
> 
> 
> 
> On 2/11/2022 1:22 PM, Evan Quan wrote:
> > Those gpu reset APIs can be granted when:
> >    - System is up and dpm features are enabled.
> >    - System is under resuming and dpm features are not yet enabled.
> >      Under such scenario, the PMFW is already alive and can support
> >      those gpu reset functionalities.
> >
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > Change-Id: I8c2f07138921eb53a2bd7fb94f9b3622af0eacf8
> > ---
> >   .../gpu/drm/amd/include/kgd_pp_interface.h    |  1 +
> >   drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 34 +++++++++++++++
> >   .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 42
> +++++++++++++++----
> >   .../drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c   |  1 +
> >   .../drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c   | 17 ++++++++
> >   drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h  |  1 +
> >   drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 32 +++++++-------
> >   7 files changed, 101 insertions(+), 27 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > index a4c267f15959..892648a4a353 100644
> > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > @@ -409,6 +409,7 @@ struct amd_pm_funcs {
> >   				   struct dpm_clocks *clock_table);
> >   	int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t
> *size);
> >   	void (*pm_compute_clocks)(void *handle);
> > +	bool (*is_smc_alive)(void *handle);
> >   };
> >
> >   struct metrics_table_header {
> > diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > index b46ae0063047..5f1d3342f87b 100644
> > --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > @@ -120,12 +120,25 @@ int
> amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
> uint32_t block
> >   	return ret;
> >   }
> >
> > +static bool amdgpu_dpm_is_smc_alive(struct amdgpu_device *adev) {
> > +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > +
> > +	if (!pp_funcs || !pp_funcs->is_smc_alive)
> > +		return false;
> > +
> > +	return pp_funcs->is_smc_alive;
> > +}
> > +
> >   int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >   	void *pp_handle = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
> >   		return -ENOENT;
> >
> > @@ -145,6 +158,9 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device
> *adev)
> >   	void *pp_handle = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
> >   		return -ENOENT;
> >
> > @@ -164,6 +180,9 @@ int amdgpu_dpm_set_mp1_state(struct
> amdgpu_device *adev,
> >   	int ret = 0;
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return -EOPNOTSUPP;
> > +
> >   	if (pp_funcs && pp_funcs->set_mp1_state) {
> >   		mutex_lock(&adev->pm.mutex);
> >
> > @@ -184,6 +203,9 @@ bool amdgpu_dpm_is_baco_supported(struct
> amdgpu_device *adev)
> >   	bool baco_cap;
> >   	int ret = 0;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return false;
> > +
> >   	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
> >   		return false;
> >
> > @@ -203,6 +225,9 @@ int amdgpu_dpm_mode2_reset(struct
> amdgpu_device *adev)
> >   	void *pp_handle = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
> >   		return -ENOENT;
> >
> > @@ -221,6 +246,9 @@ int amdgpu_dpm_baco_reset(struct
> amdgpu_device *adev)
> >   	void *pp_handle = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return -EOPNOTSUPP;
> > +
> >   	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
> >   		return -ENOENT;
> >
> > @@ -244,6 +272,9 @@ bool
> amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >   	bool support_mode1_reset = false;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return false;
> > +
> >   	if (is_support_sw_smu(adev)) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		support_mode1_reset =
> smu_mode1_reset_is_support(smu); @@ -258,6
> > +289,9 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
> >   	struct smu_context *smu = adev->powerplay.pp_handle;
> >   	int ret = -EOPNOTSUPP;
> >
> > +	if (!amdgpu_dpm_is_smc_alive(adev))
> > +		return -EOPNOTSUPP;
> > +
> >   	if (is_support_sw_smu(adev)) {
> >   		mutex_lock(&adev->pm.mutex);
> >   		ret = smu_mode1_reset(smu);
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > index bba923cfe08c..4c709f7bcd51 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > @@ -844,9 +844,6 @@ static int pp_dpm_set_mp1_state(void *handle,
> enum pp_mp1_state mp1_state)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!hwmgr->pm_en)
> > -		return 0;
> > -
> >   	if (hwmgr->hwmgr_func->set_mp1_state)
> >   		return hwmgr->hwmgr_func->set_mp1_state(hwmgr,
> mp1_state);
> >
> > @@ -1305,8 +1302,7 @@ static int pp_get_asic_baco_capability(void
> *handle, bool *cap)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!(hwmgr->not_vf && amdgpu_dpm) ||
> > -		!hwmgr->hwmgr_func->get_asic_baco_capability)
> > +	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
> >   		return 0;
> >
> >   	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap); @@ -
> 1321,7
> > +1317,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
> > +	if (!hwmgr->hwmgr_func->get_asic_baco_state)
> >   		return 0;
> >
> >   	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum
> BACO_STATE
> > *)state); @@ -1336,8 +1332,7 @@ static int pp_set_asic_baco_state(void
> *handle, int state)
> >   	if (!hwmgr)
> >   		return -EINVAL;
> >
> > -	if (!(hwmgr->not_vf && amdgpu_dpm) ||
> > -		!hwmgr->hwmgr_func->set_asic_baco_state)
> > +	if (!hwmgr->hwmgr_func->set_asic_baco_state)
> >   		return 0;
> >
> >   	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum
> > BACO_STATE)state); @@ -1379,7 +1374,7 @@ static int
> pp_asic_reset_mode_2(void *handle)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> >
> > -	if (!hwmgr || !hwmgr->pm_en)
> > +	if (!hwmgr)
> >   		return -EINVAL;
> >
> >   	if (hwmgr->hwmgr_func->asic_reset == NULL) { @@ -1517,6
> +1512,34 @@
> > static void pp_pm_compute_clocks(void *handle)
> >   			      NULL);
> >   }
> >
> > +/* MP Apertures */
> > +#define MP1_Public					0x03b00000
> > +#define smnMP1_FIRMWARE_FLAGS
> 	0x3010028
> > +#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
> 	0x00000001L
> > +
> > +static bool pp_is_smc_alive(void *handle) {
> > +	struct pp_hwmgr *hwmgr = handle;
> > +	struct amdgpu_device *adev = hwmgr->adev;
> > +	uint32_t mp1_fw_flags;
> > +
> > +	/*
> > +	 * If some ASIC(e.g. smu7/smu8) needs special handling for
> > +	 * checking smc alive, it should have its own implementation
> > +	 * for ->is_smc_alive.
> > +	 */
> > +	if (hwmgr->hwmgr_func->is_smc_alive)
> > +		return hwmgr->hwmgr_func->is_smc_alive(hwmgr);
> > +
> > +	mp1_fw_flags = RREG32_PCIE(MP1_Public |
> > +				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
> > +
> 
> The flags check doesn't tell whether PMFW is hung or not. It is a minimal
> thing that is set after PMFW boot. To call the API this condition is necessary in
> an implicit way. Driver always check this on boot, if not driver aborts smu init.
> 
> So better thing is to go ahead and send the message without any check, it will
> tell the result whether PMFW is really working or not.
> 
> In short this API is not needed.
[Quan, Evan] It was not designed to cover "PMFW hung". Instead, it was designed to be support early phase of post-silicon bringup.
At that time, the SMU may be not enabled/up. We need to prevent this API from wrongly called.

BR
Evan
> 
> Thanks,
> Lijo
> 
> > +	if (mp1_fw_flags &
> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
> > +		return true;
> > +
> > +	return false;
> > +}
> > +
> >   static const struct amd_pm_funcs pp_dpm_funcs = {
> >   	.load_firmware = pp_dpm_load_fw,
> >   	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
> @@
> > -1582,4 +1605,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
> >   	.gfx_state_change_set = pp_gfx_state_change_set,
> >   	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
> >   	.pm_compute_clocks = pp_pm_compute_clocks,
> > +	.is_smc_alive = pp_is_smc_alive,
> >   };
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
> > b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
> > index a1e11037831a..118039b96524 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
> > @@ -5735,6 +5735,7 @@ static const struct pp_hwmgr_func
> smu7_hwmgr_funcs = {
> >   	.get_asic_baco_state = smu7_baco_get_state,
> >   	.set_asic_baco_state = smu7_baco_set_state,
> >   	.power_off_asic = smu7_power_off_asic,
> > +	.is_smc_alive = smu7_is_smc_ram_running,
> >   };
> >
> >   uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff
> > --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
> > b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
> > index b50fd4a4a3d1..fc4d58329f6d 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
> > @@ -2015,6 +2015,22 @@ static void smu8_dpm_powergate_vce(struct
> pp_hwmgr *hwmgr, bool bgate)
> >   	}
> >   }
> >
> > +#define ixMP1_FIRMWARE_FLAGS
> 	0x3008210
> > +#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
> 	0x00000001L
> > +
> > +static bool smu8_is_smc_running(struct pp_hwmgr *hwmgr) {
> > +	struct amdgpu_device *adev = hwmgr->adev;
> > +	uint32_t mp1_fw_flags;
> > +
> > +	mp1_fw_flags = RREG32_SMC(ixMP1_FIRMWARE_FLAGS);
> > +
> > +	if (mp1_fw_flags &
> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
> > +		return true;
> > +
> > +	return false;
> > +}
> > +
> >   static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
> >   	.backend_init = smu8_hwmgr_backend_init,
> >   	.backend_fini = smu8_hwmgr_backend_fini, @@ -2047,6 +2063,7
> @@
> > static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
> >   	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
> >   	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
> >   	.get_thermal_temperature_range =
> > smu8_get_thermal_temperature_range,
> > +	.is_smc_alive = smu8_is_smc_running,
> >   };
> >
> >   int smu8_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git
> > a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> > b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> > index 4f7f2f455301..790fc387752c 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
> > @@ -364,6 +364,7 @@ struct pp_hwmgr_func {
> >   					bool disable);
> >   	ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table);
> >   	int (*gfx_state_change)(struct pp_hwmgr *hwmgr, uint32_t state);
> > +	bool (*is_smc_alive)(struct pp_hwmgr *hwmgr);
> >   };
> >
> >   struct pp_table_func {
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > index 8b8feaf7aa0e..27a453fb4db7 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > @@ -1845,9 +1845,6 @@ static int smu_set_mp1_state(void *handle,
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->pm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs &&
> >   	    smu->ppt_funcs->set_mp1_state)
> >   		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
> @@ -2513,9
> > +2510,6 @@ static int smu_get_baco_capability(void *handle, bool *cap)
> >
> >   	*cap = false;
> >
> > -	if (!smu->pm_enabled)
> > -		return 0;
> > -
> >   	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
> >   		*cap = smu->ppt_funcs->baco_is_support(smu);
> >
> > @@ -2527,9 +2521,6 @@ static int smu_baco_set_state(void *handle, int
> state)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->pm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (state == 0) {
> >   		if (smu->ppt_funcs->baco_exit)
> >   			ret = smu->ppt_funcs->baco_exit(smu); @@ -2551,9
> +2542,6 @@ bool
> > smu_mode1_reset_is_support(struct smu_context *smu)
> >   {
> >   	bool ret = false;
> >
> > -	if (!smu->pm_enabled)
> > -		return false;
> > -
> >   	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
> >   		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
> >
> > @@ -2564,9 +2552,6 @@ int smu_mode1_reset(struct smu_context *smu)
> >   {
> >   	int ret = 0;
> >
> > -	if (!smu->pm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->mode1_reset)
> >   		ret = smu->ppt_funcs->mode1_reset(smu);
> >
> > @@ -2578,9 +2563,6 @@ static int smu_mode2_reset(void *handle)
> >   	struct smu_context *smu = handle;
> >   	int ret = 0;
> >
> > -	if (!smu->pm_enabled)
> > -		return -EOPNOTSUPP;
> > -
> >   	if (smu->ppt_funcs->mode2_reset)
> >   		ret = smu->ppt_funcs->mode2_reset(smu);
> >
> > @@ -2712,6 +2694,19 @@ static int smu_get_prv_buffer_details(void
> *handle, void **addr, size_t *size)
> >   	return 0;
> >   }
> >
> > +static bool smu_is_smc_alive(void *handle) {
> > +	struct smu_context *smu = handle;
> > +
> > +	if (!smu->ppt_funcs->check_fw_status)
> > +		return false;
> > +
> > +	if (!smu->ppt_funcs->check_fw_status(smu))
> > +		return true;
> > +
> > +	return false;
> > +}
> > +
> >   static const struct amd_pm_funcs swsmu_pm_funcs = {
> >   	/* export for sysfs */
> >   	.set_fan_control_mode    = smu_set_fan_control_mode,
> > @@ -2765,6 +2760,7 @@ static const struct amd_pm_funcs
> swsmu_pm_funcs = {
> >   	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
> >   	.get_dpm_clock_table              = smu_get_dpm_clock_table,
> >   	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
> > +	.is_smc_alive = smu_is_smc_alive,
> >   };
> >
> >   int smu_wait_for_event(struct smu_context *smu, enum
> smu_event_type
> > event,
> >

^ permalink raw reply	[flat|nested] 23+ messages in thread

* RE: [PATCH 12/12] drm/amd/pm: revise the implementations for asic reset
  2022-02-11 13:21   ` Lazar, Lijo
@ 2022-02-17  2:53     ` Quan, Evan
  0 siblings, 0 replies; 23+ messages in thread
From: Quan, Evan @ 2022-02-17  2:53 UTC (permalink / raw)
  To: Lazar, Lijo, amd-gfx; +Cc: Deucher, Alexander, rui.huang

[AMD Official Use Only]



> -----Original Message-----
> From: Lazar, Lijo <Lijo.Lazar@amd.com>
> Sent: Friday, February 11, 2022 9:22 PM
> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>;
> rui.huang@amd.com
> Subject: Re: [PATCH 12/12] drm/amd/pm: revise the implementations for
> asic reset
> 
> 
> 
> On 2/11/2022 1:22 PM, Evan Quan wrote:
> > Instead of having an interface for every reset method, we replace them
> > with a new interface which can support all reset methods.
> >
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > Change-Id: I4c8a7121dd65c2671085673dd7c13cf7e4286f3d
> > ---
> >   drivers/gpu/drm/amd/amdgpu/aldebaran.c        |   2 +-
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    |   4 +-
> >   drivers/gpu/drm/amd/amdgpu/cik.c              |   4 +-
> >   drivers/gpu/drm/amd/amdgpu/nv.c               |  13 +-
> >   drivers/gpu/drm/amd/amdgpu/soc15.c            |  12 +-
> >   drivers/gpu/drm/amd/amdgpu/vi.c               |   6 +-
> >   .../gpu/drm/amd/include/kgd_pp_interface.h    |   7 +-
> >   drivers/gpu/drm/amd/pm/amdgpu_dpm.c           |  89 ++-----------
> >   drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  13 +-
> >   .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  |  86 ++++++++----
> >   drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 126 +++++++++++-
> ------
> >   drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   3 -
> >   12 files changed, 180 insertions(+), 185 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
> > b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
> > index a545df4efce1..22b787de313a 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
> > @@ -128,7 +128,7 @@ static int aldebaran_mode2_reset(struct
> amdgpu_device *adev)
> >   {
> >   	/* disable BM */
> >   	pci_clear_master(adev->pdev);
> > -	adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
> > +	adev->asic_reset_res = amdgpu_dpm_asic_reset(adev,
> > +AMD_RESET_METHOD_MODE2);
> >   	return adev->asic_reset_res;
> >   }
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > index 7931132ce6e3..b19bfdf81500 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > @@ -4504,9 +4504,9 @@ int amdgpu_device_mode1_reset(struct
> > amdgpu_device *adev)
> >
> >           amdgpu_device_cache_pci_state(adev->pdev);
> >
> > -        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
> > +        if (amdgpu_dpm_is_asic_reset_supported(adev,
> > + AMD_RESET_METHOD_MODE1)) {
> >                   dev_info(adev->dev, "GPU smu mode1 reset\n");
> > -                ret = amdgpu_dpm_mode1_reset(adev);
> > +                ret = amdgpu_dpm_asic_reset(adev,
> > + AMD_RESET_METHOD_MODE1);
> >           } else {
> >                   dev_info(adev->dev, "GPU psp mode1 reset\n");
> >                   ret = psp_gpu_reset(adev); diff --git
> > a/drivers/gpu/drm/amd/amdgpu/cik.c
> b/drivers/gpu/drm/amd/amdgpu/cik.c
> > index f10ce740a29c..786975716eb9 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/cik.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/cik.c
> > @@ -1380,7 +1380,7 @@ static bool cik_asic_supports_baco(struct
> amdgpu_device *adev)
> >   	switch (adev->asic_type) {
> >   	case CHIP_BONAIRE:
> >   	case CHIP_HAWAII:
> > -		return amdgpu_dpm_is_baco_supported(adev);
> > +		return amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO);
> >   	default:
> >   		return false;
> >   	}
> > @@ -1434,7 +1434,7 @@ static int cik_asic_reset(struct amdgpu_device
> > *adev)
> >
> >   	if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
> >   		dev_info(adev->dev, "BACO reset\n");
> > -		r = amdgpu_dpm_baco_reset(adev);
> > +		r = amdgpu_dpm_asic_reset(adev,
> AMD_RESET_METHOD_BACO);
> >   	} else {
> >   		dev_info(adev->dev, "PCI CONFIG reset\n");
> >   		r = cik_asic_pci_config_reset(adev); diff --git
> > a/drivers/gpu/drm/amd/amdgpu/nv.c
> b/drivers/gpu/drm/amd/amdgpu/nv.c
> > index 494e17f65fc3..2e590008d3ee 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/nv.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/nv.c
> > @@ -414,7 +414,7 @@ static int nv_asic_mode2_reset(struct
> > amdgpu_device *adev)
> >
> >   	amdgpu_device_cache_pci_state(adev->pdev);
> >
> > -	ret = amdgpu_dpm_mode2_reset(adev);
> > +	ret = amdgpu_dpm_asic_reset(adev,
> AMD_RESET_METHOD_MODE2);
> >   	if (ret)
> >   		dev_err(adev->dev, "GPU mode2 reset failed\n");
> >
> > @@ -458,7 +458,7 @@ nv_asic_reset_method(struct amdgpu_device
> *adev)
> >   	case IP_VERSION(11, 0, 13):
> >   		return AMD_RESET_METHOD_MODE1;
> >   	default:
> > -		if (amdgpu_dpm_is_baco_supported(adev))
> > +		if (amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO))
> >   			return AMD_RESET_METHOD_BACO;
> >   		else
> >   			return AMD_RESET_METHOD_MODE1;
> > @@ -476,7 +476,7 @@ static int nv_asic_reset(struct amdgpu_device
> *adev)
> >   		break;
> >   	case AMD_RESET_METHOD_BACO:
> >   		dev_info(adev->dev, "BACO reset\n");
> > -		ret = amdgpu_dpm_baco_reset(adev);
> > +		ret = amdgpu_dpm_asic_reset(adev,
> AMD_RESET_METHOD_BACO);
> >   		break;
> >   	case AMD_RESET_METHOD_MODE2:
> >   		dev_info(adev->dev, "MODE2 reset\n"); @@ -641,6 +641,11
> @@ static
> > int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
> >   	return 0;
> >   }
> >
> > +static bool nv_asic_supports_baco(struct amdgpu_device *adev) {
> > +	return amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO); }
> > +
> >   static const struct amdgpu_asic_funcs nv_asic_funcs =
> >   {
> >   	.read_disabled_bios = &nv_read_disabled_bios, @@ -657,7 +662,7
> @@
> > static const struct amdgpu_asic_funcs nv_asic_funcs =
> >   	.need_full_reset = &nv_need_full_reset,
> >   	.need_reset_on_init = &nv_need_reset_on_init,
> >   	.get_pcie_replay_count = &nv_get_pcie_replay_count,
> > -	.supports_baco = &amdgpu_dpm_is_baco_supported,
> > +	.supports_baco = &nv_asic_supports_baco,
> >   	.pre_asic_init = &nv_pre_asic_init,
> >   	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
> >   	.query_video_codecs = &nv_query_video_codecs, diff --git
> > a/drivers/gpu/drm/amd/amdgpu/soc15.c
> > b/drivers/gpu/drm/amd/amdgpu/soc15.c
> > index a216e625c89c..15ee56406bc1 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
> > @@ -508,7 +508,7 @@ static int soc15_asic_baco_reset(struct
> amdgpu_device *adev)
> >   	if (ras && adev->ras_enabled)
> >   		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
> >
> > -	ret = amdgpu_dpm_baco_reset(adev);
> > +	ret = amdgpu_dpm_asic_reset(adev, AMD_RESET_METHOD_BACO);
> >   	if (ret)
> >   		return ret;
> >
> > @@ -553,7 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device
> *adev)
> >   	case IP_VERSION(11, 0, 2):
> >   		if (adev->asic_type == CHIP_VEGA20) {
> >   			if (adev->psp.sos.fw_version >= 0x80067)
> > -				baco_reset =
> amdgpu_dpm_is_baco_supported(adev);
> > +				baco_reset =
> amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO);
> >   			/*
> >   			 * 1. PMFW version > 0x284300: all cases use baco
> >   			 * 2. PMFW version <= 0x284300: only sGPU w/o RAS
> use baco @@
> > -562,7 +562,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
> >   			    adev->pm.fw_version <= 0x283400)
> >   				baco_reset = false;
> >   		} else {
> > -			baco_reset =
> amdgpu_dpm_is_baco_supported(adev);
> > +			baco_reset =
> amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO);
> >   		}
> >   		break;
> >   	case IP_VERSION(13, 0, 2):
> > @@ -599,7 +599,7 @@ static int soc15_asic_reset(struct amdgpu_device
> *adev)
> >   		return soc15_asic_baco_reset(adev);
> >   	case AMD_RESET_METHOD_MODE2:
> >   		dev_info(adev->dev, "MODE2 reset\n");
> > -		return amdgpu_dpm_mode2_reset(adev);
> > +		return amdgpu_dpm_asic_reset(adev,
> AMD_RESET_METHOD_MODE2);
> >   	default:
> >   		dev_info(adev->dev, "MODE1 reset\n");
> >   		return amdgpu_device_mode1_reset(adev); @@ -613,10
> +613,10 @@
> > static bool soc15_supports_baco(struct amdgpu_device *adev)
> >   	case IP_VERSION(11, 0, 2):
> >   		if (adev->asic_type == CHIP_VEGA20) {
> >   			if (adev->psp.sos.fw_version >= 0x80067)
> > -				return
> amdgpu_dpm_is_baco_supported(adev);
> > +				return
> amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO);
> >   			return false;
> >   		} else {
> > -			return amdgpu_dpm_is_baco_supported(adev);
> > +			return amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO);
> >   		}
> >   		break;
> >   	default:
> > diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c
> > b/drivers/gpu/drm/amd/amdgpu/vi.c index 6645ebbd2696..de510de5e62a
> > 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/vi.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
> > @@ -904,7 +904,7 @@ static bool vi_asic_supports_baco(struct
> amdgpu_device *adev)
> >   	case CHIP_POLARIS11:
> >   	case CHIP_POLARIS12:
> >   	case CHIP_TOPAZ:
> > -		return amdgpu_dpm_is_baco_supported(adev);
> > +		return amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO);
> >   	default:
> >   		return false;
> >   	}
> > @@ -930,7 +930,7 @@ vi_asic_reset_method(struct amdgpu_device
> *adev)
> >   	case CHIP_POLARIS11:
> >   	case CHIP_POLARIS12:
> >   	case CHIP_TOPAZ:
> > -		baco_reset = amdgpu_dpm_is_baco_supported(adev);
> > +		baco_reset = amdgpu_dpm_is_asic_reset_supported(adev,
> > +AMD_RESET_METHOD_BACO);
> >   		break;
> >   	default:
> >   		baco_reset = false;
> > @@ -962,7 +962,7 @@ static int vi_asic_reset(struct amdgpu_device
> > *adev)
> >
> >   	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
> >   		dev_info(adev->dev, "BACO reset\n");
> > -		r = amdgpu_dpm_baco_reset(adev);
> > +		r = amdgpu_dpm_asic_reset(adev,
> AMD_RESET_METHOD_BACO);
> >   	} else {
> >   		dev_info(adev->dev, "PCI CONFIG reset\n");
> >   		r = vi_asic_pci_config_reset(adev); diff --git
> > a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > index 892648a4a353..8d9c32e70532 100644
> > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > @@ -300,6 +300,7 @@ struct amd_pp_clocks;
> >   struct pp_smu_wm_range_sets;
> >   struct pp_smu_nv_clock_table;
> >   struct dpm_clocks;
> > +enum amd_reset_method;
> >
> >   struct amd_pm_funcs {
> >   /* export for dpm on ci and si */
> > @@ -387,12 +388,10 @@ struct amd_pm_funcs {
> >   	int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
> >   	int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
> >   	int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
> > -	int (*get_asic_baco_capability)(void *handle, bool *cap);
> >   	int (*get_asic_baco_state)(void *handle, int *state);
> >   	int (*set_asic_baco_state)(void *handle, int state);
> >   	int (*get_ppfeature_status)(void *handle, char *buf);
> >   	int (*set_ppfeature_status)(void *handle, uint64_t
> ppfeature_masks);
> > -	int (*asic_reset_mode_2)(void *handle);
> >   	int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
> >   	int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
> >   	ssize_t (*get_gpu_metrics)(void *handle, void **table); @@ -410,6
> > +409,10 @@ struct amd_pm_funcs {
> >   	int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t
> *size);
> >   	void (*pm_compute_clocks)(void *handle);
> >   	bool (*is_smc_alive)(void *handle);
> > +	int (*is_asic_reset_supported)(void *handle,
> > +				       enum amd_reset_method
> reset_method);
> > +	int (*asic_reset)(void *handle,
> > +			  enum amd_reset_method reset_method);
> >   };
> >
> >   struct metrics_table_header {
> > diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > index f237dd3a3f66..b72945f6a338 100644
> > --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
> > @@ -196,107 +196,42 @@ int amdgpu_dpm_set_mp1_state(struct
> amdgpu_device *adev,
> >   	return ret;
> >   }
> >
> > -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
> > +int amdgpu_dpm_is_asic_reset_supported(struct amdgpu_device *adev,
> > +				       enum amd_reset_method reset_method)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > -	void *pp_handle = adev->powerplay.pp_handle;
> > -	bool baco_cap;
> > -	int ret = 0;
> > +	int reset_supported = false;
> >
> >   	if (!amdgpu_dpm_is_smc_alive(adev))
> >   		return false;
> >
> > -	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
> > +	if (!pp_funcs || !pp_funcs->is_asic_reset_supported)
> >   		return false;
> >
> >   	mutex_lock(&adev->pm.mutex);
> > -
> > -	ret = pp_funcs->get_asic_baco_capability(pp_handle,
> > -						 &baco_cap);
> > -
> > +	reset_supported = pp_funcs->is_asic_reset_supported(adev-
> >powerplay.pp_handle,
> > +							    reset_method);
> >   	mutex_unlock(&adev->pm.mutex);
> >
> > -	return ret ? false : baco_cap;
> > +	return reset_supported;
> >   }
> >
> > -int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
> > +int amdgpu_dpm_asic_reset(struct amdgpu_device *adev,
> > +			  enum amd_reset_method reset_method)
> >   {
> >   	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > -	void *pp_handle = adev->powerplay.pp_handle;
> >   	int ret = 0;
> >
> >   	if (!amdgpu_dpm_is_smc_alive(adev))
> >   		return -EOPNOTSUPP;
> >
> > -	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
> > -		return -ENOENT;
> > -
> > -	mutex_lock(&adev->pm.mutex);
> > -
> > -	ret = pp_funcs->asic_reset_mode_2(pp_handle);
> > -
> > -	mutex_unlock(&adev->pm.mutex);
> > -
> > -	return ret;
> > -}
> > -
> > -int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) -{
> > -	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> > -	void *pp_handle = adev->powerplay.pp_handle;
> > -	int ret = 0;
> > -
> > -	if (!amdgpu_dpm_is_smc_alive(adev))
> > +	if (!pp_funcs || !pp_funcs->asic_reset)
> >   		return -EOPNOTSUPP;
> >
> > -	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
> > -		return -ENOENT;
> > -
> >   	mutex_lock(&adev->pm.mutex);
> > -
> > -	/* enter BACO state */
> > -	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
> > -	if (ret)
> > -		goto out;
> > -
> > -	/* exit BACO state */
> > -	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
> > -
> > -out:
> > +	ret = pp_funcs->asic_reset(adev->powerplay.pp_handle,
> > +				   reset_method);
> >   	mutex_unlock(&adev->pm.mutex);
> > -	return ret;
> > -}
> > -
> > -bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device
> *adev)
> > -{
> > -	struct smu_context *smu = adev->powerplay.pp_handle;
> > -	bool support_mode1_reset = false;
> > -
> > -	if (!amdgpu_dpm_is_smc_alive(adev))
> > -		return false;
> > -
> > -	if (is_support_sw_smu(adev)) {
> > -		mutex_lock(&adev->pm.mutex);
> > -		support_mode1_reset =
> smu_mode1_reset_is_support(smu);
> > -		mutex_unlock(&adev->pm.mutex);
> > -	}
> > -
> > -	return support_mode1_reset;
> > -}
> > -
> > -int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) -{
> > -	struct smu_context *smu = adev->powerplay.pp_handle;
> > -	int ret = -EOPNOTSUPP;
> > -
> > -	if (!amdgpu_dpm_is_smc_alive(adev))
> > -		return -EOPNOTSUPP;
> > -
> > -	if (is_support_sw_smu(adev)) {
> > -		mutex_lock(&adev->pm.mutex);
> > -		ret = smu_mode1_reset(smu);
> > -		mutex_unlock(&adev->pm.mutex);
> > -	}
> >
> >   	return ret;
> >   }
> > diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> > b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> > index 49488aebd350..bda8b8149497 100644
> > --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> > +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
> > @@ -374,15 +374,6 @@ int amdgpu_dpm_switch_power_profile(struct
> amdgpu_device *adev,
> >   				    enum PP_SMC_POWER_PROFILE type,
> >   				    bool en);
> >
> > -int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
> > -
> > -int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
> > -
> > -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
> > -
> > -bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device
> *adev);
> > -int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
> > -
> >   int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
> >   			     enum pp_mp1_state mp1_state);
> >
> > @@ -542,4 +533,8 @@ enum pp_smu_status
> amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
> >   						  unsigned int *num_states);
> >   int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
> >   				   struct dpm_clocks *clock_table);
> > +int amdgpu_dpm_is_asic_reset_supported(struct amdgpu_device *adev,
> > +				       enum amd_reset_method
> reset_method); int
> > +amdgpu_dpm_asic_reset(struct amdgpu_device *adev,
> > +			  enum amd_reset_method reset_method);
> >   #endif
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > index 81ec5464b679..3edc05296e01 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> > @@ -1177,20 +1177,6 @@ static int pp_set_active_display_count(void
> *handle, uint32_t count)
> >   	return phm_set_active_display_count(hwmgr, count);
> >   }
> >
> > -static int pp_get_asic_baco_capability(void *handle, bool *cap) -{
> > -	struct pp_hwmgr *hwmgr = handle;
> > -
> > -	*cap = false;
> > -
> > -	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
> > -		return 0;
> > -
> > -	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
> > -
> > -	return 0;
> > -}
> > -
> >   static int pp_get_asic_baco_state(void *handle, int *state)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> > @@ -1242,18 +1228,6 @@ static int pp_set_ppfeature_status(void *handle,
> uint64_t ppfeature_masks)
> >   	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr,
> ppfeature_masks);
> >   }
> >
> > -static int pp_asic_reset_mode_2(void *handle) -{
> > -	struct pp_hwmgr *hwmgr = handle;
> > -
> > -	if (hwmgr->hwmgr_func->asic_reset == NULL) {
> > -		pr_info_ratelimited("%s was not implemented.\n",
> __func__);
> > -		return -EINVAL;
> > -	}
> > -
> > -	return hwmgr->hwmgr_func->asic_reset(hwmgr,
> SMU_ASIC_RESET_MODE_2);
> > -}
> > -
> >   static int pp_smu_i2c_bus_access(void *handle, bool acquire)
> >   {
> >   	struct pp_hwmgr *hwmgr = handle;
> > @@ -1394,6 +1368,62 @@ static bool pp_is_smc_alive(void *handle)
> >   	return false;
> >   }
> >
> > +static int pp_is_asic_reset_supported(void *handle,
> > +				       enum amd_reset_method reset_method)
> {
> > +	struct pp_hwmgr *hwmgr = handle;
> > +	bool reset_supported = false;
> > +
> > +	switch (reset_method) {
> > +	case AMD_RESET_METHOD_BACO:
> > +		if (hwmgr->hwmgr_func->get_asic_baco_capability)
> > +			hwmgr->hwmgr_func-
> >get_asic_baco_capability(hwmgr,
> > +
> &reset_supported);
> > +		break;
> > +	case AMD_RESET_METHOD_MODE1:
> > +	case AMD_RESET_METHOD_MODE2:
> > +	default:
> > +		break;
> > +	}
> > +
> > +	return reset_supported;
> > +}
> > +
> > +static int pp_asic_reset(void *handle,
> > +			 enum amd_reset_method reset_method) {
> > +	struct pp_hwmgr *hwmgr = handle;
> > +	int ret = 0;
> > +
> > +	switch (reset_method) {
> > +	case AMD_RESET_METHOD_MODE1:
> > +		return -EOPNOTSUPP;
> > +	case AMD_RESET_METHOD_MODE2:
> > +		if (!hwmgr->hwmgr_func->asic_reset)
> > +			return -EOPNOTSUPP;
> > +
> > +		ret = hwmgr->hwmgr_func->asic_reset(hwmgr,
> > +
> SMU_ASIC_RESET_MODE_2);
> > +		break;
> > +	case AMD_RESET_METHOD_BACO:
> > +		if (!hwmgr->hwmgr_func->set_asic_baco_state)
> > +			return -EOPNOTSUPP;
> > +
> > +		ret = hwmgr->hwmgr_func->set_asic_baco_state(hwmgr,
> > +							     BACO_STATE_IN);
> > +		if (ret)
> > +			return ret;
> > +
> > +		ret = hwmgr->hwmgr_func->set_asic_baco_state(hwmgr,
> > +
> BACO_STATE_OUT);
> > +		break;
> > +	default:
> > +		return -EINVAL;
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> >   static const struct amd_pm_funcs pp_dpm_funcs = {
> >   	.load_firmware = pp_dpm_load_fw,
> >   	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
> @@
> > -1446,12 +1476,10 @@ static const struct amd_pm_funcs pp_dpm_funcs =
> {
> >   	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
> >   	.set_hard_min_dcefclk_by_freq =
> pp_set_hard_min_dcefclk_by_freq,
> >   	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
> > -	.get_asic_baco_capability = pp_get_asic_baco_capability,
> >   	.get_asic_baco_state = pp_get_asic_baco_state,
> >   	.set_asic_baco_state = pp_set_asic_baco_state,
> >   	.get_ppfeature_status = pp_get_ppfeature_status,
> >   	.set_ppfeature_status = pp_set_ppfeature_status,
> > -	.asic_reset_mode_2 = pp_asic_reset_mode_2,
> >   	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
> >   	.set_df_cstate = pp_set_df_cstate,
> >   	.set_xgmi_pstate = pp_set_xgmi_pstate, @@ -1460,4 +1488,6 @@
> static
> > const struct amd_pm_funcs pp_dpm_funcs = {
> >   	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
> >   	.pm_compute_clocks = pp_pm_compute_clocks,
> >   	.is_smc_alive = pp_is_smc_alive,
> > +	.is_asic_reset_supported = pp_is_asic_reset_supported,
> > +	.asic_reset = pp_asic_reset,
> >   };
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > index 3773e95a18bf..bab5ddc667f9 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> > @@ -2503,18 +2503,6 @@ static int smu_set_xgmi_pstate(void *handle,
> >   	return ret;
> >   }
> >
> > -static int smu_get_baco_capability(void *handle, bool *cap) -{
> > -	struct smu_context *smu = handle;
> > -
> > -	*cap = false;
> > -
> > -	if (smu->ppt_funcs->baco_is_support)
> > -		*cap = smu->ppt_funcs->baco_is_support(smu);
> > -
> > -	return 0;
> > -}
> > -
> >   static int smu_baco_set_state(void *handle, int state)
> >   {
> >   	struct smu_context *smu = handle;
> > @@ -2537,40 +2525,6 @@ static int smu_baco_set_state(void *handle, int
> state)
> >   	return ret;
> >   }
> >
> > -bool smu_mode1_reset_is_support(struct smu_context *smu) -{
> > -	bool ret = false;
> > -
> > -	if (smu->ppt_funcs->mode1_reset_is_support)
> > -		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
> > -
> > -	return ret;
> > -}
> > -
> > -int smu_mode1_reset(struct smu_context *smu) -{
> > -	int ret = 0;
> > -
> > -	if (smu->ppt_funcs->mode1_reset)
> > -		ret = smu->ppt_funcs->mode1_reset(smu);
> > -
> > -	return ret;
> > -}
> > -
> > -static int smu_mode2_reset(void *handle) -{
> > -	struct smu_context *smu = handle;
> > -	int ret = 0;
> > -
> > -	if (smu->ppt_funcs->mode2_reset)
> > -		ret = smu->ppt_funcs->mode2_reset(smu);
> > -
> > -	if (ret)
> > -		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
> > -
> > -	return ret;
> > -}
> > -
> >   static int smu_get_max_sustainable_clocks_by_dc(void *handle,
> >   						struct
> pp_smu_nv_clock_table *max_clocks)
> >   {
> > @@ -2705,6 +2659,82 @@ static bool smu_is_smc_alive(void *handle)
> >   	return false;
> >   }
> >
> > +static int smu_is_asic_reset_supported(void *handle,
> > +				       enum amd_reset_method reset_method)
> {
> > +	struct smu_context *smu = handle;
> > +	struct amdgpu_device *adev = smu->adev;
> > +	int reset_supported = false;
> > +
> > +	switch (reset_method) {
> > +	case AMD_RESET_METHOD_MODE1:
> > +		if (smu->ppt_funcs->mode1_reset_is_support)
> > +			reset_supported = smu->ppt_funcs-
> >mode1_reset_is_support(smu);
> > +		break;
> > +	case AMD_RESET_METHOD_MODE2:
> > +		switch (adev->ip_versions[MP1_HWIP][0]) {
> > +		case IP_VERSION(11, 5, 0):
> > +		case IP_VERSION(12, 0, 0):
> > +		case IP_VERSION(12, 0, 1):
> > +		case IP_VERSION(13, 0, 2):
> > +		case IP_VERSION(13, 0, 1):
> > +		case IP_VERSION(13, 0, 3):
> > +			reset_supported = true;
> > +			break;
> 
> Patch 2 drops mode2_reset_is_support(). What about changing to
> is_reset_supported() and avoiding other checks here?
> 
> 	return smu->ppt_funcs->is_reset_supported(smu, reset_method);
[Quan, Evan] Sounds good to me. Let me consider how to do this.

BR
Evan
> 
> Thanks,
> Lijo
> 
> > +		default:
> > +			break;
> > +		}
> > +		break;
> > +	case AMD_RESET_METHOD_BACO:
> > +		if (smu->ppt_funcs->baco_is_support)
> > +			reset_supported = smu->ppt_funcs-
> >baco_is_support(smu);
> > +		break;
> > +	default:
> > +		break;
> > +	}
> > +
> > +	return reset_supported;
> > +}
> > +
> > +static int smu_asic_reset(void *handle,
> > +			  enum amd_reset_method reset_method) {
> > +	struct smu_context *smu = handle;
> > +	int ret = 0;
> > +
> > +	switch (reset_method) {
> > +	case AMD_RESET_METHOD_MODE1:
> > +		if (!smu->ppt_funcs->mode1_reset)
> > +			return -EOPNOTSUPP;
> > +
> > +		ret = smu->ppt_funcs->mode1_reset(smu);
> > +		break;
> > +	case AMD_RESET_METHOD_MODE2:
> > +		if (!smu->ppt_funcs->mode2_reset)
> > +			return -EOPNOTSUPP;
> > +
> > +		ret = smu->ppt_funcs->mode2_reset(smu);
> > +		if (ret)
> > +			dev_err(smu->adev->dev, "Mode2 reset failed!\n");
> > +		break;
> > +	case AMD_RESET_METHOD_BACO:
> > +		if (!smu->ppt_funcs->baco_enter ||
> > +		    !smu->ppt_funcs->baco_exit)
> > +			return -EOPNOTSUPP;
> > +
> > +		ret = smu->ppt_funcs->baco_enter(smu);
> > +		if (ret)
> > +			return ret;
> > +
> > +		ret = smu->ppt_funcs->baco_exit(smu);
> > +		break;
> > +	default:
> > +		return -EINVAL;
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> >   static const struct amd_pm_funcs swsmu_pm_funcs = {
> >   	/* export for sysfs */
> >   	.set_fan_control_mode    = smu_set_fan_control_mode,
> > @@ -2744,11 +2774,9 @@ static const struct amd_pm_funcs
> swsmu_pm_funcs = {
> >   	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
> >   	.set_active_display_count         = smu_set_display_count,
> >   	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
> > -	.get_asic_baco_capability         = smu_get_baco_capability,
> >   	.set_asic_baco_state              = smu_baco_set_state,
> >   	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
> >   	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
> > -	.asic_reset_mode_2                = smu_mode2_reset,
> >   	.set_df_cstate                    = smu_set_df_cstate,
> >   	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
> >   	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
> > @@ -2759,6 +2787,8 @@ static const struct amd_pm_funcs
> swsmu_pm_funcs = {
> >   	.get_dpm_clock_table              = smu_get_dpm_clock_table,
> >   	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
> >   	.is_smc_alive = smu_is_smc_alive,
> > +	.is_asic_reset_supported = smu_is_asic_reset_supported,
> > +	.asic_reset              = smu_asic_reset,
> >   };
> >
> >   int smu_wait_for_event(struct smu_context *smu, enum
> smu_event_type
> > event, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > index bced761f3f96..ce9cd0522a40 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> > @@ -1392,9 +1392,6 @@ int smu_get_power_limit(void *handle,
> >   			enum pp_power_limit_level pp_limit_level,
> >   			enum pp_power_type pp_power_type);
> >
> > -bool smu_mode1_reset_is_support(struct smu_context *smu); -int
> > smu_mode1_reset(struct smu_context *smu);
> > -
> >   extern const struct amd_ip_funcs smu_ip_funcs;
> >
> >   bool is_support_sw_smu(struct amdgpu_device *adev);
> >

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c
  2022-02-17  2:35     ` Quan, Evan
@ 2022-02-17  4:55       ` Lazar, Lijo
  0 siblings, 0 replies; 23+ messages in thread
From: Lazar, Lijo @ 2022-02-17  4:55 UTC (permalink / raw)
  To: Quan, Evan, amd-gfx; +Cc: Deucher, Alexander, rui.huang



On 2/17/2022 8:05 AM, Quan, Evan wrote:
> [AMD Official Use Only]
> 
> 
> 
>> -----Original Message-----
>> From: Lazar, Lijo <Lijo.Lazar@amd.com>
>> Sent: Friday, February 11, 2022 9:40 PM
>> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
>> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>;
>> rui.huang@amd.com
>> Subject: Re: [PATCH 05/12] drm/amd/pm: move the check for dpm
>> enablement to amdgpu_dpm.c
>>
>>
>>
>> On 2/11/2022 1:22 PM, Evan Quan wrote:
>>> Instead of checking this in every instance(framework), moving that check
>> to
>>> amdgpu_dpm.c is more proper. And that can make code clean and tidy.
>>>
>>> Signed-off-by: Evan Quan <evan.quan@amd.com>
>>> Change-Id: I2f83a3b860e8aa12cc86f119011f520fbe21a301
>>> ---
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       |   5 +-
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |  16 +-
>>>    drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 277
>> ++++++++++++++++--
>>>    drivers/gpu/drm/amd/pm/amdgpu_pm.c            |  25 +-
>>>    drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h       |  12 +-
>>>    .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c    |   4 -
>>>    .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 117 ++++----
>>>    drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 135 +--------
>>>    drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |   1 -
>>>    9 files changed, 352 insertions(+), 240 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> index 2c929fa40379..fff0e6a3882e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> @@ -261,11 +261,14 @@ static int amdgpu_ctx_get_stable_pstate(struct
>> amdgpu_ctx *ctx,
>>>    {
>>>    	struct amdgpu_device *adev = ctx->adev;
>>>    	enum amd_dpm_forced_level current_level;
>>> +	int ret = 0;
>>>
>>>    	if (!ctx)
>>>    		return -EINVAL;
>>>
>>> -	current_level = amdgpu_dpm_get_performance_level(adev);
>>> +	ret = amdgpu_dpm_get_performance_level(adev, &current_level);
>>> +	if (ret)
>>> +		return ret;
>>>
>>>    	switch (current_level) {
>>>    	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> index 9f985bd463be..56144f25b720 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> @@ -813,15 +813,17 @@ int amdgpu_info_ioctl(struct drm_device *dev,
>> void *data, struct drm_file *filp)
>>>    		unsigned i;
>>>    		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
>>>    		struct amd_vce_state *vce_state;
>>> +		int ret = 0;
>>>
>>>    		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
>>> -			vce_state =
>> amdgpu_dpm_get_vce_clock_state(adev, i);
>>> -			if (vce_state) {
>>> -				vce_clk_table.entries[i].sclk = vce_state-
>>> sclk;
>>> -				vce_clk_table.entries[i].mclk = vce_state-
>>> mclk;
>>> -				vce_clk_table.entries[i].eclk = vce_state-
>>> evclk;
>>> -				vce_clk_table.num_valid_entries++;
>>> -			}
>>> +			ret = amdgpu_dpm_get_vce_clock_state(adev, i,
>> vce_state);
>>> +			if (ret)
>>> +				return ret;
>>> +
>>> +			vce_clk_table.entries[i].sclk = vce_state->sclk;
>>> +			vce_clk_table.entries[i].mclk = vce_state->mclk;
>>> +			vce_clk_table.entries[i].eclk = vce_state->evclk;
>>> +			vce_clk_table.num_valid_entries++;
>>>    		}
>>>
>>>    		return copy_to_user(out, &vce_clk_table,
>>> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>> b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>>> index 1d63f1e8884c..b46ae0063047 100644
>>> --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>>> +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>>> @@ -41,6 +41,9 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device
>> *adev, bool low)
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return 0;
>>> +
>>>    	if (!pp_funcs->get_sclk)
>>>    		return 0;
>>>
>>> @@ -57,6 +60,9 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device
>> *adev, bool low)
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return 0;
>>> +
>>>    	if (!pp_funcs->get_mclk)
>>>    		return 0;
>>>
>>> @@ -74,6 +80,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct
>> amdgpu_device *adev, uint32_t block
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF :
>> POWER_STATE_ON;
>>>
>>> +	if (!adev->pm.dpm_enabled) {
>>> +		dev_WARN(adev->dev,
>>> +			 "SMU uninitialized but power %s requested
>> for %u!\n",
>>> +			 gate ? "gate" : "ungate", block_type);
>>> +		return -EOPNOTSUPP;
>>> +	}
>>> +
>>>    	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
>>>    		dev_dbg(adev->dev, "IP block%d already in the target %s
>> state!",
>>>    				block_type, gate ? "gate" : "ungate");
>>> @@ -261,6 +274,9 @@ int amdgpu_dpm_switch_power_profile(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (amdgpu_sriov_vf(adev))
>>>    		return 0;
>>>
>>> @@ -280,6 +296,9 @@ int amdgpu_dpm_set_xgmi_pstate(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = pp_funcs->set_xgmi_pstate(adev-
>>> powerplay.pp_handle,
>>> @@ -297,6 +316,9 @@ int amdgpu_dpm_set_df_cstate(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	void *pp_handle = adev->powerplay.pp_handle;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (pp_funcs && pp_funcs->set_df_cstate) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
>>> @@ -311,6 +333,9 @@ int amdgpu_dpm_allow_xgmi_power_down(struct
>> amdgpu_device *adev, bool en)
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (is_support_sw_smu(adev)) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = smu_allow_xgmi_power_down(smu, en);
>>> @@ -327,6 +352,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct
>> amdgpu_device *adev)
>>>    			adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
>>> @@ -344,6 +372,9 @@ int amdgpu_dpm_set_clockgating_by_smu(struct
>> amdgpu_device *adev,
>>>    			adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
>>> @@ -362,6 +393,9 @@ int amdgpu_dpm_smu_i2c_bus_access(struct
>> amdgpu_device *adev,
>>>    			adev->powerplay.pp_funcs;
>>>    	int ret = -EOPNOTSUPP;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>
>> I2C bus access doesn't need DPM to be enabled.
> [Quan, Evan] The "adev->pm.dpm_enabled" flag is a little confusing. It does not actually mean DPM features need to be enabled.
> Instead, it just tells the SMU ip initialization process has been fully completed and the API is designed to be supported under such scenario.
> 
> Unless the API is expected to be supported under the following scenarios also, otherwise it needs to be guarded by "adev->pm.dpm_enabled".
> - Even DPM is disabled explicitly (by module parameter "dpm=0")
>    - some initial setups related APUs(smu_dpm_set_vcn/ jpeg_enable) fall into above scenario
> - The deinitialization was performed but reinitialization has not yet been kicked (resuming from suspend)
>    - those gpu reset related APIs fall into above scenario
> 
> As I cannot see any reason to support the I2C bus access API under the two scenarios above.
> So I think the guard "adev->pm.dpm_enabled" is reasonable here.

For I2C usage, it only needs the FW to be loaded. RAS uses i2c transfers 
and RAS may be enabled even if dpm=0. Similar is the case for any FRU 
access.

Thanks,
Lijo

>>
>>>    	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
>>> @@ -398,6 +432,9 @@ int amdgpu_dpm_read_sensor(struct
>> amdgpu_device *adev, enum amd_pp_sensors senso
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = -EINVAL;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!data || !size)
>>>    		return -EINVAL;
>>>
>>> @@ -485,6 +522,9 @@ int amdgpu_dpm_handle_passthrough_sbr(struct
>> amdgpu_device *adev, bool enable)
>>>    {
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>
>> Please double check on this one also.
>>
>>>    	if (is_support_sw_smu(adev)) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = smu_handle_passthrough_sbr(adev-
>>> powerplay.pp_handle,
>>> @@ -500,6 +540,9 @@ int
>> amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev,
>> uint32_t size)
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	mutex_lock(&adev->pm.mutex);
>>>    	ret = smu_send_hbm_bad_pages_num(smu, size);
>>>    	mutex_unlock(&adev->pm.mutex);
>>> @@ -514,6 +557,9 @@ int amdgpu_dpm_get_dpm_freq_range(struct
>> amdgpu_device *adev,
>>>    {
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (type != PP_SCLK)
>>>    		return -EINVAL;
>>>
>>> @@ -538,6 +584,9 @@ int amdgpu_dpm_set_soft_freq_range(struct
>> amdgpu_device *adev,
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (type != PP_SCLK)
>>>    		return -EINVAL;
>>>
>>> @@ -556,14 +605,18 @@ int amdgpu_dpm_set_soft_freq_range(struct
>> amdgpu_device *adev,
>>>
>>>    int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
>>>    {
>>> -	struct smu_context *smu = adev->powerplay.pp_handle;
>>> +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!is_support_sw_smu(adev))
>>>    		return 0;
>>>
>>>    	mutex_lock(&adev->pm.mutex);
>>> -	ret = smu_write_watermarks_table(smu);
>>> +	ret = pp_funcs->set_watermarks_for_clock_ranges(adev-
>>> powerplay.pp_handle,
>>> +							NULL);
>>>    	mutex_unlock(&adev->pm.mutex);
>>>
>>>    	return ret;
>>> @@ -576,6 +629,9 @@ int amdgpu_dpm_wait_for_event(struct
>> amdgpu_device *adev,
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>
>> In this case also DPM doesn't need to be enabled.
> [Quan, Evan] This seems used by mode2 reset only. Maybe using the guard "amdgpu_dpm_is_smc_alive" implemented in patch7 is more proper.
>>
>> In general this patch assumes DPM interfaces to continue. There was a
>> discussion around getting rid of dpm and moving to smu component based
>> interface. This patch goes in the opposite direction.
> [Quan, Evan] No, they do not conflict. We can still advance in that way.
> I do not want to put those swsmu based interfaces into an intermediate state(some have "dpm_enabled" guard in amdgpu_dpm.c while others in amdgpu_smu.c).
> That was my only consideration.
> 
> BR
> Evan
>>
>> Thanks,
>> Lijo
>>
>>>    	if (!is_support_sw_smu(adev))
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -591,6 +647,9 @@ int amdgpu_dpm_get_status_gfxoff(struct
>> amdgpu_device *adev, uint32_t *value)
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!is_support_sw_smu(adev))
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -605,6 +664,9 @@ uint64_t
>> amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device
>> *adev)
>>>    {
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return 0;
>>> +
>>>    	if (!is_support_sw_smu(adev))
>>>    		return 0;
>>>
>>> @@ -619,6 +681,9 @@ uint64_t
>> amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device
>> *adev)
>>>    void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
>>>    				 enum gfx_change_state state)
>>>    {
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return;
>>> +
>>>    	mutex_lock(&adev->pm.mutex);
>>>    	if (adev->powerplay.pp_funcs &&
>>>    	    adev->powerplay.pp_funcs->gfx_state_change_set)
>>> @@ -632,27 +697,33 @@ int amdgpu_dpm_get_ecc_info(struct
>> amdgpu_device *adev,
>>>    {
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!is_support_sw_smu(adev))
>>>    		return -EOPNOTSUPP;
>>>
>>>    	return smu_get_ecc_info(smu, umc_ecc);
>>>    }
>>>
>>> -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct
>> amdgpu_device *adev,
>>> -						     uint32_t idx)
>>> +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
>>> +				   uint32_t idx,
>>> +				   struct amd_vce_state *vstate)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>> -	struct amd_vce_state *vstate = NULL;
>>> +
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>>
>>>    	if (!pp_funcs->get_vce_clock_state)
>>> -		return NULL;
>>> +		return -EOPNOTSUPP;
>>>
>>>    	mutex_lock(&adev->pm.mutex);
>>>    	vstate = pp_funcs->get_vce_clock_state(adev-
>>> powerplay.pp_handle,
>>>    					       idx);
>>>    	mutex_unlock(&adev->pm.mutex);
>>>
>>> -	return vstate;
>>> +	return 0;
>>>    }
>>>
>>>    void amdgpu_dpm_get_current_power_state(struct amdgpu_device
>> *adev,
>>> @@ -660,6 +731,9 @@ void
>> amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return;
>>> +
>>>    	mutex_lock(&adev->pm.mutex);
>>>
>>>    	if (!pp_funcs->get_current_power_state) {
>>> @@ -679,6 +753,9 @@ void
>> amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
>>>    void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
>>>    				enum amd_pm_state_type state)
>>>    {
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return;
>>> +
>>>    	mutex_lock(&adev->pm.mutex);
>>>    	adev->pm.dpm.user_state = state;
>>>    	mutex_unlock(&adev->pm.mutex);
>>> @@ -692,19 +769,22 @@ void amdgpu_dpm_set_power_state(struct
>> amdgpu_device *adev,
>>>    		amdgpu_dpm_compute_clocks(adev);
>>>    }
>>>
>>> -enum amd_dpm_forced_level
>> amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
>>> +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
>>> +				     enum amd_dpm_forced_level *level)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>> -	enum amd_dpm_forced_level level;
>>> +
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>>
>>>    	mutex_lock(&adev->pm.mutex);
>>>    	if (pp_funcs->get_performance_level)
>>> -		level = pp_funcs->get_performance_level(adev-
>>> powerplay.pp_handle);
>>> +		*level = pp_funcs->get_performance_level(adev-
>>> powerplay.pp_handle);
>>>    	else
>>> -		level = adev->pm.dpm.forced_level;
>>> +		*level = adev->pm.dpm.forced_level;
>>>    	mutex_unlock(&adev->pm.mutex);
>>>
>>> -	return level;
>>> +	return 0;
>>>    }
>>>
>>>    int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
>>> @@ -717,13 +797,16 @@ int
>> amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
>>>
>> 	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
>>>
>> 	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->force_performance_level)
>>>    		return 0;
>>>
>>>    	if (adev->pm.dpm.thermal_active)
>>>    		return -EINVAL;
>>>
>>> -	current_level = amdgpu_dpm_get_performance_level(adev);
>>> +	amdgpu_dpm_get_performance_level(adev, &current_level);
>>>    	if (current_level == level)
>>>    		return 0;
>>>
>>> @@ -783,6 +866,9 @@ int amdgpu_dpm_get_pp_num_states(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_pp_num_states)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -801,6 +887,9 @@ int amdgpu_dpm_dispatch_task(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->dispatch_tasks)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -818,6 +907,9 @@ int amdgpu_dpm_get_pp_table(struct
>> amdgpu_device *adev, char **table)
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_pp_table)
>>>    		return 0;
>>>
>>> @@ -837,6 +929,9 @@ int amdgpu_dpm_set_fine_grain_clk_vol(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_fine_grain_clk_vol)
>>>    		return 0;
>>>
>>> @@ -858,6 +953,9 @@ int amdgpu_dpm_odn_edit_dpm_table(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->odn_edit_dpm_table)
>>>    		return 0;
>>>
>>> @@ -878,6 +976,9 @@ int amdgpu_dpm_print_clock_levels(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->print_clock_levels)
>>>    		return 0;
>>>
>>> @@ -917,6 +1018,9 @@ int amdgpu_dpm_set_ppfeature_status(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_ppfeature_status)
>>>    		return 0;
>>>
>>> @@ -933,6 +1037,9 @@ int amdgpu_dpm_get_ppfeature_status(struct
>> amdgpu_device *adev, char *buf)
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_ppfeature_status)
>>>    		return 0;
>>>
>>> @@ -951,6 +1058,9 @@ int amdgpu_dpm_force_clock_level(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->force_clock_level)
>>>    		return 0;
>>>
>>> @@ -963,27 +1073,33 @@ int amdgpu_dpm_force_clock_level(struct
>> amdgpu_device *adev,
>>>    	return ret;
>>>    }
>>>
>>> -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
>>> +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev,
>>> +			   uint32_t *value)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>> -	int ret = 0;
>>> +
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>>
>>>    	if (!pp_funcs->get_sclk_od)
>>> -		return 0;
>>> +		return -EOPNOTSUPP;
>>>
>>>    	mutex_lock(&adev->pm.mutex);
>>> -	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
>>> +	*value = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
>>>    	mutex_unlock(&adev->pm.mutex);
>>>
>>> -	return ret;
>>> +	return 0;
>>>    }
>>>
>>>    int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t
>> value)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (is_support_sw_smu(adev))
>>> -		return 0;
>>> +		return -EOPNOTSUPP;
>>>
>>>    	mutex_lock(&adev->pm.mutex);
>>>    	if (pp_funcs->set_sclk_od)
>>> @@ -1000,27 +1116,33 @@ int amdgpu_dpm_set_sclk_od(struct
>> amdgpu_device *adev, uint32_t value)
>>>    	return 0;
>>>    }
>>>
>>> -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
>>> +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev,
>>> +			   uint32_t *value)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>> -	int ret = 0;
>>> +
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>>
>>>    	if (!pp_funcs->get_mclk_od)
>>> -		return 0;
>>> +		return -EOPNOTSUPP;
>>>
>>>    	mutex_lock(&adev->pm.mutex);
>>> -	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
>>> +	*value = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
>>>    	mutex_unlock(&adev->pm.mutex);
>>>
>>> -	return ret;
>>> +	return 0;
>>>    }
>>>
>>>    int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t
>> value)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (is_support_sw_smu(adev))
>>> -		return 0;
>>> +		return -EOPNOTSUPP;
>>>
>>>    	mutex_lock(&adev->pm.mutex);
>>>    	if (pp_funcs->set_mclk_od)
>>> @@ -1043,6 +1165,9 @@ int
>> amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_power_profile_mode)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1060,6 +1185,9 @@ int
>> amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_power_profile_mode)
>>>    		return 0;
>>>
>>> @@ -1077,6 +1205,9 @@ int amdgpu_dpm_get_gpu_metrics(struct
>> amdgpu_device *adev, void **table)
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_gpu_metrics)
>>>    		return 0;
>>>
>>> @@ -1094,6 +1225,9 @@ int amdgpu_dpm_get_fan_control_mode(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_fan_control_mode)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1111,6 +1245,9 @@ int amdgpu_dpm_set_fan_speed_pwm(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_fan_speed_pwm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1128,6 +1265,9 @@ int amdgpu_dpm_get_fan_speed_pwm(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_fan_speed_pwm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1145,6 +1285,9 @@ int amdgpu_dpm_get_fan_speed_rpm(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_fan_speed_rpm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1162,6 +1305,9 @@ int amdgpu_dpm_set_fan_speed_rpm(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_fan_speed_rpm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1179,6 +1325,9 @@ int amdgpu_dpm_set_fan_control_mode(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_fan_control_mode)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1198,6 +1347,9 @@ int amdgpu_dpm_get_power_limit(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_power_limit)
>>>    		return -ENODATA;
>>>
>>> @@ -1217,6 +1369,9 @@ int amdgpu_dpm_set_power_limit(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_power_limit)
>>>    		return -EINVAL;
>>>
>>> @@ -1232,6 +1387,9 @@ int amdgpu_dpm_is_cclk_dpm_supported(struct
>> amdgpu_device *adev)
>>>    {
>>>    	bool cclk_dpm_supported = false;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return false;
>>> +
>>>    	if (!is_support_sw_smu(adev))
>>>    		return false;
>>>
>>> @@ -1247,6 +1405,9 @@ int
>> amdgpu_dpm_debugfs_print_current_performance_level(struct
>> amdgpu_device *ade
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->debugfs_print_current_performance_level)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1265,6 +1426,9 @@ int
>> amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_smu_prv_buf_details)
>>>    		return -ENOSYS;
>>>
>>> @@ -1282,6 +1446,9 @@ int amdgpu_dpm_is_overdrive_supported(struct
>> amdgpu_device *adev)
>>>    	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return false;
>>> +
>>>    	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
>>>    	    (is_support_sw_smu(adev) && smu->is_apu) ||
>>>    		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
>>> @@ -1297,6 +1464,9 @@ int amdgpu_dpm_set_pp_table(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_pp_table)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1313,6 +1483,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct
>> amdgpu_device *adev)
>>>    {
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return INT_MAX;
>>> +
>>>    	if (!is_support_sw_smu(adev))
>>>    		return INT_MAX;
>>>
>>> @@ -1321,6 +1494,9 @@ int amdgpu_dpm_get_num_cpu_cores(struct
>> amdgpu_device *adev)
>>>
>>>    void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
>>>    {
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return;
>>> +
>>>    	if (!is_support_sw_smu(adev))
>>>    		return;
>>>
>>> @@ -1333,6 +1509,9 @@ int
>> amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->display_configuration_change)
>>>    		return 0;
>>>
>>> @@ -1351,6 +1530,9 @@ int amdgpu_dpm_get_clock_by_type(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_clock_by_type)
>>>    		return 0;
>>>
>>> @@ -1369,6 +1551,9 @@ int
>> amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device
>> *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_display_mode_validation_clocks)
>>>    		return 0;
>>>
>>> @@ -1387,6 +1572,9 @@ int
>> amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device
>> *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_clock_by_type_with_latency)
>>>    		return 0;
>>>
>>> @@ -1406,6 +1594,9 @@ int
>> amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device
>> *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_clock_by_type_with_voltage)
>>>    		return 0;
>>>
>>> @@ -1424,6 +1615,9 @@ int
>> amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device
>> *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_watermarks_for_clocks_ranges)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1441,6 +1635,9 @@ int
>> amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->display_clock_voltage_request)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1458,6 +1655,9 @@ int amdgpu_dpm_get_current_clocks(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_current_clocks)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1473,6 +1673,9 @@ void
>> amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return;
>>> +
>>>    	if (!pp_funcs->notify_smu_enable_pwe)
>>>    		return;
>>>
>>> @@ -1487,6 +1690,9 @@ int
>> amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_active_display_count)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1504,6 +1710,9 @@ int
>> amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->set_min_deep_sleep_dcefclk)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1520,6 +1729,9 @@ void
>> amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return;
>>> +
>>>    	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
>>>    		return;
>>>
>>> @@ -1534,6 +1746,9 @@ void
>> amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return;
>>> +
>>>    	if (!pp_funcs->set_hard_min_fclk_by_freq)
>>>    		return;
>>>
>>> @@ -1549,6 +1764,9 @@ int
>> amdgpu_dpm_display_disable_memory_clock_switch(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->display_disable_memory_clock_switch)
>>>    		return 0;
>>>
>>> @@ -1566,6 +1784,9 @@ int
>> amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device
>> *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1584,6 +1805,9 @@ enum pp_smu_status
>> amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_uclk_dpm_states)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -1602,6 +1826,9 @@ int amdgpu_dpm_get_dpm_clock_table(struct
>> amdgpu_device *adev,
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	int ret = 0;
>>>
>>> +	if (!adev->pm.dpm_enabled)
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs->get_dpm_clock_table)
>>>    		return -EOPNOTSUPP;
>>>
>>> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
>> b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
>>> index b0243068212b..84aab3bb9bdc 100644
>>> --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
>>> +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
>>> @@ -273,11 +273,14 @@ static ssize_t
>> amdgpu_get_power_dpm_force_performance_level(struct device *dev,
>>>    		return ret;
>>>    	}
>>>
>>> -	level = amdgpu_dpm_get_performance_level(adev);
>>> +	ret = amdgpu_dpm_get_performance_level(adev, &level);
>>>
>>>    	pm_runtime_mark_last_busy(ddev->dev);
>>>    	pm_runtime_put_autosuspend(ddev->dev);
>>>
>>> +	if (ret)
>>> +		return ret;
>>> +
>>>    	return sysfs_emit(buf, "%s\n",
>>>    			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
>> "auto" :
>>>    			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
>>> @@ -1241,11 +1244,14 @@ static ssize_t amdgpu_get_pp_sclk_od(struct
>> device *dev,
>>>    		return ret;
>>>    	}
>>>
>>> -	value = amdgpu_dpm_get_sclk_od(adev);
>>> +	ret = amdgpu_dpm_get_sclk_od(adev, &value);
>>>
>>>    	pm_runtime_mark_last_busy(ddev->dev);
>>>    	pm_runtime_put_autosuspend(ddev->dev);
>>>
>>> +	if (ret)
>>> +		return ret;
>>> +
>>>    	return sysfs_emit(buf, "%d\n", value);
>>>    }
>>>
>>> @@ -1275,11 +1281,14 @@ static ssize_t amdgpu_set_pp_sclk_od(struct
>> device *dev,
>>>    		return ret;
>>>    	}
>>>
>>> -	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
>>> +	ret = amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
>>>
>>>    	pm_runtime_mark_last_busy(ddev->dev);
>>>    	pm_runtime_put_autosuspend(ddev->dev);
>>>
>>> +	if (ret)
>>> +		return ret;
>>> +
>>>    	return count;
>>>    }
>>>
>>> @@ -1303,11 +1312,14 @@ static ssize_t amdgpu_get_pp_mclk_od(struct
>> device *dev,
>>>    		return ret;
>>>    	}
>>>
>>> -	value = amdgpu_dpm_get_mclk_od(adev);
>>> +	ret = amdgpu_dpm_get_mclk_od(adev, &value);
>>>
>>>    	pm_runtime_mark_last_busy(ddev->dev);
>>>    	pm_runtime_put_autosuspend(ddev->dev);
>>>
>>> +	if (ret)
>>> +		return ret;
>>> +
>>>    	return sysfs_emit(buf, "%d\n", value);
>>>    }
>>>
>>> @@ -1337,11 +1349,14 @@ static ssize_t amdgpu_set_pp_mclk_od(struct
>> device *dev,
>>>    		return ret;
>>>    	}
>>>
>>> -	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
>>> +	ret = amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
>>>
>>>    	pm_runtime_mark_last_busy(ddev->dev);
>>>    	pm_runtime_put_autosuspend(ddev->dev);
>>>
>>> +	if (ret)
>>> +		return ret;
>>> +
>>>    	return count;
>>>    }
>>>
>>> diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
>> b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
>>> index ddfa55b59d02..49488aebd350 100644
>>> --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
>>> +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
>>> @@ -429,12 +429,14 @@ void amdgpu_dpm_gfx_state_change(struct
>> amdgpu_device *adev,
>>>    				 enum gfx_change_state state);
>>>    int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
>>>    			    void *umc_ecc);
>>> -struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct
>> amdgpu_device *adev,
>>> -						     uint32_t idx);
>>> +int amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
>>> +				   uint32_t idx,
>>> +				   struct amd_vce_state *vstate);
>>>    void amdgpu_dpm_get_current_power_state(struct amdgpu_device
>> *adev, enum amd_pm_state_type *state);
>>>    void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
>>>    				enum amd_pm_state_type state);
>>> -enum amd_dpm_forced_level
>> amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
>>> +int amdgpu_dpm_get_performance_level(struct amdgpu_device *adev,
>>> +				     enum amd_dpm_forced_level *level);
>>>    int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
>>>    				       enum amd_dpm_forced_level level);
>>>    int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
>>> @@ -464,9 +466,9 @@ int amdgpu_dpm_get_ppfeature_status(struct
>> amdgpu_device *adev, char *buf);
>>>    int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
>>>    				 enum pp_clock_type type,
>>>    				 uint32_t mask);
>>> -int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
>>> +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev, uint32_t
>> *value);
>>>    int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t
>> value);
>>> -int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
>>> +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev, uint32_t
>> *value);
>>>    int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t
>> value);
>>>    int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device
>> *adev,
>>>    				      char *buf);
>>> diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
>> b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
>>> index 9613c6181c17..59550617cf54 100644
>>> --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
>>> +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
>>> @@ -959,10 +959,6 @@ static int
>> amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
>>>    	int ret;
>>>    	bool equal = false;
>>>
>>> -	/* if dpm init failed */
>>> -	if (!adev->pm.dpm_enabled)
>>> -		return 0;
>>> -
>>>    	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
>>>    		/* add other state override checks here */
>>>    		if ((!adev->pm.dpm.thermal_active) &&
>>> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>> b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>>> index 991ac4adb263..bba923cfe08c 100644
>>> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>>> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>>> @@ -295,7 +295,7 @@ static int pp_set_clockgating_by_smu(void *handle,
>> uint32_t msg_id)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
>>> @@ -335,7 +335,7 @@ static int pp_dpm_force_performance_level(void
>> *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (level == hwmgr->dpm_level)
>>> @@ -353,7 +353,7 @@ static enum amd_dpm_forced_level
>> pp_dpm_get_performance_level(
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	return hwmgr->dpm_level;
>>> @@ -363,7 +363,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool
>> low)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return 0;
>>>
>>>    	if (hwmgr->hwmgr_func->get_sclk == NULL) {
>>> @@ -377,7 +377,7 @@ static uint32_t pp_dpm_get_mclk(void *handle,
>> bool low)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return 0;
>>>
>>>    	if (hwmgr->hwmgr_func->get_mclk == NULL) {
>>> @@ -391,7 +391,7 @@ static void pp_dpm_powergate_vce(void *handle,
>> bool gate)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return;
>>>
>>>    	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
>>> @@ -405,7 +405,7 @@ static void pp_dpm_powergate_uvd(void *handle,
>> bool gate)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return;
>>>
>>>    	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
>>> @@ -420,7 +420,7 @@ static int pp_dpm_dispatch_tasks(void *handle,
>> enum amd_pp_task task_id,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	return hwmgr_handle_task(hwmgr, task_id, user_state);
>>> @@ -432,7 +432,7 @@ static enum amd_pm_state_type
>> pp_dpm_get_current_power_state(void *handle)
>>>    	struct pp_power_state *state;
>>>    	enum amd_pm_state_type pm_type;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled || !hwmgr->current_ps)
>>> +	if (!hwmgr || !hwmgr->current_ps)
>>>    		return -EINVAL;
>>>
>>>    	state = hwmgr->current_ps;
>>> @@ -462,7 +462,7 @@ static int pp_dpm_set_fan_control_mode(void
>> *handle, uint32_t mode)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
>>> @@ -480,7 +480,7 @@ static int pp_dpm_get_fan_control_mode(void
>> *handle, uint32_t *fan_mode)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
>>> @@ -497,7 +497,7 @@ static int pp_dpm_set_fan_speed_pwm(void
>> *handle, uint32_t speed)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
>>> @@ -513,7 +513,7 @@ static int pp_dpm_get_fan_speed_pwm(void
>> *handle, uint32_t *speed)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
>>> @@ -529,7 +529,7 @@ static int pp_dpm_get_fan_speed_rpm(void
>> *handle, uint32_t *rpm)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
>>> @@ -545,7 +545,7 @@ static int pp_dpm_set_fan_speed_rpm(void
>> *handle, uint32_t rpm)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
>>> @@ -565,7 +565,7 @@ static int pp_dpm_get_pp_num_states(void
>> *handle,
>>>
>>>    	memset(data, 0, sizeof(*data));
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!hwmgr->ps)
>>> +	if (!hwmgr || !hwmgr->ps)
>>>    		return -EINVAL;
>>>
>>>    	data->nums = hwmgr->num_ps;
>>> @@ -597,7 +597,7 @@ static int pp_dpm_get_pp_table(void *handle, char
>> **table)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!hwmgr->soft_pp_table)
>>> +	if (!hwmgr || !hwmgr->soft_pp_table)
>>>    		return -EINVAL;
>>>
>>>    	*table = (char *)hwmgr->soft_pp_table;
>>> @@ -625,7 +625,7 @@ static int pp_dpm_set_pp_table(void *handle,
>> const char *buf, size_t size)
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>    	int ret = -ENOMEM;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (!hwmgr->hardcode_pp_table) {
>>> @@ -655,7 +655,7 @@ static int pp_dpm_force_clock_level(void *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
>>> @@ -676,7 +676,7 @@ static int pp_dpm_print_clock_levels(void *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
>>> @@ -690,7 +690,7 @@ static int pp_dpm_get_sclk_od(void *handle)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
>>> @@ -704,7 +704,7 @@ static int pp_dpm_set_sclk_od(void *handle,
>> uint32_t value)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
>>> @@ -719,7 +719,7 @@ static int pp_dpm_get_mclk_od(void *handle)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
>>> @@ -733,7 +733,7 @@ static int pp_dpm_set_mclk_od(void *handle,
>> uint32_t value)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
>>> @@ -748,7 +748,7 @@ static int pp_dpm_read_sensor(void *handle, int
>> idx,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled || !value)
>>> +	if (!hwmgr || !value)
>>>    		return -EINVAL;
>>>
>>>    	switch (idx) {
>>> @@ -774,7 +774,7 @@ pp_dpm_get_vce_clock_state(void *handle,
>> unsigned idx)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return NULL;
>>>
>>>    	if (idx < hwmgr->num_vce_state_tables)
>>> @@ -786,7 +786,7 @@ static int pp_get_power_profile_mode(void
>> *handle, char *buf)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled || !hwmgr->hwmgr_func->get_power_profile_mode)
>>> +	if (!hwmgr || !hwmgr->hwmgr_func->get_power_profile_mode)
>>>    		return -EOPNOTSUPP;
>>>    	if (!buf)
>>>    		return -EINVAL;
>>> @@ -798,7 +798,7 @@ static int pp_set_power_profile_mode(void
>> *handle, long *input, uint32_t size)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled || !hwmgr->hwmgr_func->set_power_profile_mode)
>>> +	if (!hwmgr || !hwmgr->hwmgr_func->set_power_profile_mode)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
>>> @@ -813,7 +813,7 @@ static int pp_set_fine_grain_clk_vol(void *handle,
>> uint32_t type, long *input, u
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
>>> @@ -826,7 +826,7 @@ static int pp_odn_edit_dpm_table(void *handle,
>> uint32_t type, long *input, uint3
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
>>> @@ -860,7 +860,7 @@ static int pp_dpm_switch_power_profile(void
>> *handle,
>>>    	long workload;
>>>    	uint32_t index;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
>>> @@ -900,7 +900,7 @@ static int pp_set_power_limit(void *handle,
>> uint32_t limit)
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>    	uint32_t max_power_limit;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
>>> @@ -932,7 +932,7 @@ static int pp_get_power_limit(void *handle,
>> uint32_t *limit,
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!limit)
>>> +	if (!hwmgr || !limit)
>>>    		return -EINVAL;
>>>
>>>    	if (power_type != PP_PWR_TYPE_SUSTAINED)
>>> @@ -965,7 +965,7 @@ static int pp_display_configuration_change(void
>> *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	phm_store_dal_configuration_data(hwmgr, display_config);
>>> @@ -977,7 +977,7 @@ static int pp_get_display_power_level(void *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!output)
>>> +	if (!hwmgr || !output)
>>>    		return -EINVAL;
>>>
>>>    	return phm_get_dal_power_level(hwmgr, output);
>>> @@ -991,7 +991,7 @@ static int pp_get_current_clocks(void *handle,
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	phm_get_dal_power_level(hwmgr, &simple_clocks);
>>> @@ -1035,7 +1035,7 @@ static int pp_get_clock_by_type(void *handle,
>> enum amd_pp_clock_type type, struc
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (clocks == NULL)
>>> @@ -1050,7 +1050,7 @@ static int
>> pp_get_clock_by_type_with_latency(void *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!clocks)
>>> +	if (!hwmgr || !clocks)
>>>    		return -EINVAL;
>>>
>>>    	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
>>> @@ -1062,7 +1062,7 @@ static int
>> pp_get_clock_by_type_with_voltage(void *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!clocks)
>>> +	if (!hwmgr || !clocks)
>>>    		return -EINVAL;
>>>
>>>    	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
>>> @@ -1073,7 +1073,7 @@ static int
>> pp_set_watermarks_for_clocks_ranges(void *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled || !clock_ranges)
>>> +	if (!hwmgr || !clock_ranges)
>>>    		return -EINVAL;
>>>
>>>    	return phm_set_watermarks_for_clocks_ranges(hwmgr,
>>> @@ -1085,7 +1085,7 @@ static int pp_display_clock_voltage_request(void
>> *handle,
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!clock)
>>> +	if (!hwmgr || !clock)
>>>    		return -EINVAL;
>>>
>>>    	return phm_display_clock_voltage_request(hwmgr, clock);
>>> @@ -1097,7 +1097,7 @@ static int
>> pp_get_display_mode_validation_clocks(void *handle,
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled ||!clocks)
>>> +	if (!hwmgr || !clocks)
>>>    		return -EINVAL;
>>>
>>>    	clocks->level = PP_DAL_POWERLEVEL_7;
>>> @@ -1112,7 +1112,7 @@ static int pp_dpm_powergate_mmhub(void
>> *handle)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
>>> @@ -1127,7 +1127,7 @@ static int pp_dpm_powergate_gfx(void *handle,
>> bool gate)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return 0;
>>>
>>>    	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
>>> @@ -1142,7 +1142,7 @@ static void pp_dpm_powergate_acp(void *handle,
>> bool gate)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return;
>>>
>>>    	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
>>> @@ -1208,7 +1208,7 @@ static int pp_notify_smu_enable_pwe(void
>> *handle)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
>>> @@ -1228,8 +1228,7 @@ static int pp_enable_mgpu_fan_boost(void
>> *handle)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled ||
>>> -	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
>>> +	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
>>>    		return 0;
>>>
>>>    	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
>>> @@ -1241,7 +1240,7 @@ static int pp_set_min_deep_sleep_dcefclk(void
>> *handle, uint32_t clock)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
>>> @@ -1258,7 +1257,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void
>> *handle, uint32_t clock)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL)
>> {
>>> @@ -1275,7 +1274,7 @@ static int pp_set_hard_min_fclk_by_freq(void
>> *handle, uint32_t clock)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
>>> @@ -1292,7 +1291,7 @@ static int pp_set_active_display_count(void
>> *handle, uint32_t count)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	return phm_set_active_display_count(hwmgr, count);
>>> @@ -1350,7 +1349,7 @@ static int pp_get_ppfeature_status(void *handle,
>> char *buf)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled || !buf)
>>> +	if (!hwmgr || !buf)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
>>> @@ -1365,7 +1364,7 @@ static int pp_set_ppfeature_status(void *handle,
>> uint64_t ppfeature_masks)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
>>> @@ -1395,7 +1394,7 @@ static int pp_smu_i2c_bus_access(void *handle,
>> bool acquire)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
>>> @@ -1413,7 +1412,7 @@ static int pp_set_df_cstate(void *handle, enum
>> pp_df_cstate state)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
>> || !hwmgr->hwmgr_func->set_df_cstate)
>>> +	if (!hwmgr->hwmgr_func->set_df_cstate)
>>>    		return 0;
>>>
>>>    	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
>>> @@ -1428,7 +1427,7 @@ static int pp_set_xgmi_pstate(void *handle,
>> uint32_t pstate)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
>> || !hwmgr->hwmgr_func->set_xgmi_pstate)
>>> +	if (!hwmgr->hwmgr_func->set_xgmi_pstate)
>>>    		return 0;
>>>
>>>    	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
>>> @@ -1443,7 +1442,7 @@ static ssize_t pp_get_gpu_metrics(void *handle,
>> void **table)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled
>> || !hwmgr->hwmgr_func->get_gpu_metrics)
>>> +	if (!hwmgr->hwmgr_func->get_gpu_metrics)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
>>> @@ -1453,7 +1452,7 @@ static int pp_gfx_state_change_set(void *handle,
>> uint32_t state)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !((struct amdgpu_device *)hwmgr->adev)-
>>> pm.dpm_enabled)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
>>> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>> b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>>> index 96a3388c2cb7..97c57a6cf314 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>>> @@ -68,9 +68,6 @@ static int smu_sys_get_pp_feature_mask(void
>> *handle,
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	return smu_get_pp_feature_mask(smu, buf);
>>>    }
>>>
>>> @@ -79,9 +76,6 @@ static int smu_sys_set_pp_feature_mask(void
>> *handle,
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	return smu_set_pp_feature_mask(smu, new_mask);
>>>    }
>>>
>>> @@ -219,13 +213,6 @@ static int smu_dpm_set_power_gate(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled) {
>>> -		dev_WARN(smu->adev->dev,
>>> -			 "SMU uninitialized but power %s requested
>> for %u!\n",
>>> -			 gate ? "gate" : "ungate", block_type);
>>> -		return -EOPNOTSUPP;
>>> -	}
>>> -
>>>    	switch (block_type) {
>>>    	/*
>>>    	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
>>> @@ -315,9 +302,6 @@ static void smu_restore_dpm_user_profile(struct
>> smu_context *smu)
>>>    	if (!smu->adev->in_suspend)
>>>    		return;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return;
>>> -
>>>    	/* Enable restore flag */
>>>    	smu->user_dpm_profile.flags |=
>> SMU_DPM_USER_PROFILE_RESTORE;
>>>
>>> @@ -428,9 +412,6 @@ static int smu_sys_get_pp_table(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	struct smu_table_context *smu_table = &smu->smu_table;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu_table->power_play_table && !smu_table-
>>> hardcode_pptable)
>>>    		return -EINVAL;
>>>
>>> @@ -451,9 +432,6 @@ static int smu_sys_set_pp_table(void *handle,
>>>    	ATOM_COMMON_TABLE_HEADER *header =
>> (ATOM_COMMON_TABLE_HEADER *)buf;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (header->usStructureSize != size) {
>>>    		dev_err(smu->adev->dev, "pp table size not matched !\n");
>>>    		return -EIO;
>>> @@ -1564,9 +1542,6 @@ static int smu_display_configuration_change(void
>> *handle,
>>>    	int index = 0;
>>>    	int num_of_active_display = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!display_config)
>>>    		return -EINVAL;
>>>
>>> @@ -1704,9 +1679,6 @@ static int smu_handle_task(struct smu_context
>> *smu,
>>>    {
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	switch (task_id) {
>>>    	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
>>>    		ret = smu_pre_display_config_changed(smu);
>>> @@ -1745,9 +1717,6 @@ static int smu_switch_power_profile(void
>> *handle,
>>>    	long workload;
>>>    	uint32_t index;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
>>>    		return -EINVAL;
>>>
>>> @@ -1775,9 +1744,6 @@ static enum amd_dpm_forced_level
>> smu_get_performance_level(void *handle)
>>>    	struct smu_context *smu = handle;
>>>    	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
>>>    		return -EINVAL;
>>>
>>> @@ -1791,9 +1757,6 @@ static int smu_force_performance_level(void
>> *handle,
>>>    	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
>>>    		return -EINVAL;
>>>
>>> @@ -1817,9 +1780,6 @@ static int smu_set_display_count(void *handle,
>> uint32_t count)
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	return smu_init_display_count(smu, count);
>>>    }
>>>
>>> @@ -1830,9 +1790,6 @@ static int smu_force_smuclk_levels(struct
>> smu_context *smu,
>>>    	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu_dpm_ctx->dpm_level !=
>> AMD_DPM_FORCED_LEVEL_MANUAL) {
>>>    		dev_dbg(smu->adev->dev, "force clock level is for dpm
>> manual mode only.\n");
>>>    		return -EINVAL;
>>> @@ -1917,9 +1874,6 @@ static int smu_set_df_cstate(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
>>>    		return 0;
>>>
>>> @@ -1934,9 +1888,6 @@ int smu_allow_xgmi_power_down(struct
>> smu_context *smu, bool en)
>>>    {
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
>>>    		return 0;
>>>
>>> @@ -1947,22 +1898,11 @@ int smu_allow_xgmi_power_down(struct
>> smu_context *smu, bool en)
>>>    	return ret;
>>>    }
>>>
>>> -int smu_write_watermarks_table(struct smu_context *smu)
>>> -{
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>> -	return smu_set_watermarks_table(smu, NULL);
>>> -}
>>> -
>>>    static int smu_set_watermarks_for_clock_ranges(void *handle,
>>>    					       struct pp_smu_wm_range_sets
>> *clock_ranges)
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->disable_watermark)
>>>    		return 0;
>>>
>>> @@ -1973,9 +1913,6 @@ int smu_set_ac_dc(struct smu_context *smu)
>>>    {
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	/* controlled by firmware */
>>>    	if (smu->dc_controlled_by_gpio)
>>>    		return 0;
>>> @@ -2083,9 +2020,6 @@ static int smu_set_fan_speed_rpm(void *handle,
>> uint32_t speed)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs->set_fan_speed_rpm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -2126,9 +2060,6 @@ int smu_get_power_limit(void *handle,
>>>    	uint32_t limit_type;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	switch(pp_power_type) {
>>>    	case PP_PWR_TYPE_SUSTAINED:
>>>    		limit_type = SMU_DEFAULT_PPT_LIMIT;
>>> @@ -2199,9 +2130,6 @@ static int smu_set_power_limit(void *handle,
>> uint32_t limit)
>>>    	uint32_t limit_type = limit >> 24;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	limit &= (1<<24)-1;
>>>    	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
>>>    		if (smu->ppt_funcs->set_power_limit)
>>> @@ -2230,9 +2158,6 @@ static int smu_print_smuclk_levels(struct
>> smu_context *smu, enum smu_clk_type cl
>>>    {
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->print_clk_levels)
>>>    		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
>>>
>>> @@ -2319,9 +2244,6 @@ static int smu_od_edit_dpm_table(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->od_edit_dpm_table) {
>>>    		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input,
>> size);
>>>    	}
>>> @@ -2340,9 +2262,6 @@ static int smu_read_sensor(void *handle,
>>>    	int ret = 0;
>>>    	uint32_t *size, size_val;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!data || !size_arg)
>>>    		return -EINVAL;
>>>
>>> @@ -2399,8 +2318,7 @@ static int smu_get_power_profile_mode(void
>> *handle, char *buf)
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled ||
>>> -	    !smu->ppt_funcs->get_power_profile_mode)
>>> +	if (!smu->ppt_funcs->get_power_profile_mode)
>>>    		return -EOPNOTSUPP;
>>>    	if (!buf)
>>>    		return -EINVAL;
>>> @@ -2414,8 +2332,7 @@ static int smu_set_power_profile_mode(void
>> *handle,
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled ||
>>> -	    !smu->ppt_funcs->set_power_profile_mode)
>>> +	if (!smu->ppt_funcs->set_power_profile_mode)
>>>    		return -EOPNOTSUPP;
>>>
>>>    	return smu_bump_power_profile_mode(smu, param, param_size);
>>> @@ -2426,9 +2343,6 @@ static int smu_get_fan_control_mode(void
>> *handle, u32 *fan_mode)
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs->get_fan_control_mode)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -2445,9 +2359,6 @@ static int smu_set_fan_control_mode(void
>> *handle, u32 value)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs->set_fan_control_mode)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -2478,9 +2389,6 @@ static int smu_get_fan_speed_pwm(void
>> *handle, u32 *speed)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs->get_fan_speed_pwm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -2497,9 +2405,6 @@ static int smu_set_fan_speed_pwm(void *handle,
>> u32 speed)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs->set_fan_speed_pwm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -2524,9 +2429,6 @@ static int smu_get_fan_speed_rpm(void *handle,
>> uint32_t *speed)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs->get_fan_speed_rpm)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -2542,9 +2444,6 @@ static int smu_set_deep_sleep_dcefclk(void
>> *handle, uint32_t clk)
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	return smu_set_min_dcef_deep_sleep(smu, clk);
>>>    }
>>>
>>> @@ -2556,9 +2455,6 @@ static int
>> smu_get_clock_by_type_with_latency(void *handle,
>>>    	enum smu_clk_type clk_type;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
>>>    		switch (type) {
>>>    		case amd_pp_sys_clock:
>>> @@ -2590,9 +2486,6 @@ static int
>> smu_display_clock_voltage_request(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->display_clock_voltage_request)
>>>    		ret = smu->ppt_funcs->display_clock_voltage_request(smu,
>> clock_req);
>>>
>>> @@ -2606,9 +2499,6 @@ static int
>> smu_display_disable_memory_clock_switch(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = -EINVAL;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->display_disable_memory_clock_switch)
>>>    		ret = smu->ppt_funcs-
>>> display_disable_memory_clock_switch(smu,
>> disable_memory_clock_switch);
>>>
>>> @@ -2621,9 +2511,6 @@ static int smu_set_xgmi_pstate(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->set_xgmi_pstate)
>>>    		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
>>>
>>> @@ -2722,9 +2609,6 @@ static int
>> smu_get_max_sustainable_clocks_by_dc(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
>>>    		ret = smu->ppt_funcs-
>>> get_max_sustainable_clocks_by_dc(smu, max_clocks);
>>>
>>> @@ -2738,9 +2622,6 @@ static int smu_get_uclk_dpm_states(void
>> *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->get_uclk_dpm_states)
>>>    		ret = smu->ppt_funcs->get_uclk_dpm_states(smu,
>> clock_values_in_khz, num_states);
>>>
>>> @@ -2752,9 +2633,6 @@ static enum amd_pm_state_type
>> smu_get_current_power_state(void *handle)
>>>    	struct smu_context *smu = handle;
>>>    	enum amd_pm_state_type pm_state =
>> POWER_STATE_TYPE_DEFAULT;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->get_current_power_state)
>>>    		pm_state = smu->ppt_funcs-
>>> get_current_power_state(smu);
>>>
>>> @@ -2767,9 +2645,6 @@ static int smu_get_dpm_clock_table(void
>> *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->get_dpm_clock_table)
>>>    		ret = smu->ppt_funcs->get_dpm_clock_table(smu,
>> clock_table);
>>>
>>> @@ -2780,9 +2655,6 @@ static ssize_t smu_sys_get_gpu_metrics(void
>> *handle, void **table)
>>>    {
>>>    	struct smu_context *smu = handle;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (!smu->ppt_funcs->get_gpu_metrics)
>>>    		return -EOPNOTSUPP;
>>>
>>> @@ -2794,9 +2666,6 @@ static int smu_enable_mgpu_fan_boost(void
>> *handle)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->adev->pm.dpm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->enable_mgpu_fan_boost)
>>>    		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
>>>
>>> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
>> b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
>>> index 39d169440d15..bced761f3f96 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
>>> @@ -1399,7 +1399,6 @@ extern const struct amd_ip_funcs smu_ip_funcs;
>>>
>>>    bool is_support_sw_smu(struct amdgpu_device *adev);
>>>    bool is_support_cclk_dpm(struct amdgpu_device *adev);
>>> -int smu_write_watermarks_table(struct smu_context *smu);
>>>
>>>    int smu_get_dpm_freq_range(struct smu_context *smu, enum
>> smu_clk_type clk_type,
>>>    			   uint32_t *min, uint32_t *max);
>>>

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs
  2022-02-17  2:48     ` Quan, Evan
@ 2022-02-17  5:00       ` Lazar, Lijo
  0 siblings, 0 replies; 23+ messages in thread
From: Lazar, Lijo @ 2022-02-17  5:00 UTC (permalink / raw)
  To: Quan, Evan, amd-gfx; +Cc: Deucher, Alexander, rui.huang



On 2/17/2022 8:18 AM, Quan, Evan wrote:
> [AMD Official Use Only]
> 
> 
> 
>> -----Original Message-----
>> From: Lazar, Lijo <Lijo.Lazar@amd.com>
>> Sent: Monday, February 14, 2022 12:04 PM
>> To: Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
>> Cc: Deucher, Alexander <Alexander.Deucher@amd.com>;
>> rui.huang@amd.com
>> Subject: Re: [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu
>> reset APIs
>>
>>
>>
>> On 2/11/2022 1:22 PM, Evan Quan wrote:
>>> Those gpu reset APIs can be granted when:
>>>     - System is up and dpm features are enabled.
>>>     - System is under resuming and dpm features are not yet enabled.
>>>       Under such scenario, the PMFW is already alive and can support
>>>       those gpu reset functionalities.
>>>
>>> Signed-off-by: Evan Quan <evan.quan@amd.com>
>>> Change-Id: I8c2f07138921eb53a2bd7fb94f9b3622af0eacf8
>>> ---
>>>    .../gpu/drm/amd/include/kgd_pp_interface.h    |  1 +
>>>    drivers/gpu/drm/amd/pm/amdgpu_dpm.c           | 34 +++++++++++++++
>>>    .../gpu/drm/amd/pm/powerplay/amd_powerplay.c  | 42
>> +++++++++++++++----
>>>    .../drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c   |  1 +
>>>    .../drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c   | 17 ++++++++
>>>    drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h  |  1 +
>>>    drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 32 +++++++-------
>>>    7 files changed, 101 insertions(+), 27 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> index a4c267f15959..892648a4a353 100644
>>> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> @@ -409,6 +409,7 @@ struct amd_pm_funcs {
>>>    				   struct dpm_clocks *clock_table);
>>>    	int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t
>> *size);
>>>    	void (*pm_compute_clocks)(void *handle);
>>> +	bool (*is_smc_alive)(void *handle);
>>>    };
>>>
>>>    struct metrics_table_header {
>>> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>>> b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>>> index b46ae0063047..5f1d3342f87b 100644
>>> --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>>> +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
>>> @@ -120,12 +120,25 @@ int
>> amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
>> uint32_t block
>>>    	return ret;
>>>    }
>>>
>>> +static bool amdgpu_dpm_is_smc_alive(struct amdgpu_device *adev) {
>>> +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>> +
>>> +	if (!pp_funcs || !pp_funcs->is_smc_alive)
>>> +		return false;
>>> +
>>> +	return pp_funcs->is_smc_alive;
>>> +}
>>> +
>>>    int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
>>>    {
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>    	void *pp_handle = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
>>>    		return -ENOENT;
>>>
>>> @@ -145,6 +158,9 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device
>> *adev)
>>>    	void *pp_handle = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
>>>    		return -ENOENT;
>>>
>>> @@ -164,6 +180,9 @@ int amdgpu_dpm_set_mp1_state(struct
>> amdgpu_device *adev,
>>>    	int ret = 0;
>>>    	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (pp_funcs && pp_funcs->set_mp1_state) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>
>>> @@ -184,6 +203,9 @@ bool amdgpu_dpm_is_baco_supported(struct
>> amdgpu_device *adev)
>>>    	bool baco_cap;
>>>    	int ret = 0;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return false;
>>> +
>>>    	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
>>>    		return false;
>>>
>>> @@ -203,6 +225,9 @@ int amdgpu_dpm_mode2_reset(struct
>> amdgpu_device *adev)
>>>    	void *pp_handle = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
>>>    		return -ENOENT;
>>>
>>> @@ -221,6 +246,9 @@ int amdgpu_dpm_baco_reset(struct
>> amdgpu_device *adev)
>>>    	void *pp_handle = adev->powerplay.pp_handle;
>>>    	int ret = 0;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
>>>    		return -ENOENT;
>>>
>>> @@ -244,6 +272,9 @@ bool
>> amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>    	bool support_mode1_reset = false;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return false;
>>> +
>>>    	if (is_support_sw_smu(adev)) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		support_mode1_reset =
>> smu_mode1_reset_is_support(smu); @@ -258,6
>>> +289,9 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
>>>    	struct smu_context *smu = adev->powerplay.pp_handle;
>>>    	int ret = -EOPNOTSUPP;
>>>
>>> +	if (!amdgpu_dpm_is_smc_alive(adev))
>>> +		return -EOPNOTSUPP;
>>> +
>>>    	if (is_support_sw_smu(adev)) {
>>>    		mutex_lock(&adev->pm.mutex);
>>>    		ret = smu_mode1_reset(smu);
>>> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>>> b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>>> index bba923cfe08c..4c709f7bcd51 100644
>>> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>>> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
>>> @@ -844,9 +844,6 @@ static int pp_dpm_set_mp1_state(void *handle,
>> enum pp_mp1_state mp1_state)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!hwmgr->pm_en)
>>> -		return 0;
>>> -
>>>    	if (hwmgr->hwmgr_func->set_mp1_state)
>>>    		return hwmgr->hwmgr_func->set_mp1_state(hwmgr,
>> mp1_state);
>>>
>>> @@ -1305,8 +1302,7 @@ static int pp_get_asic_baco_capability(void
>> *handle, bool *cap)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!(hwmgr->not_vf && amdgpu_dpm) ||
>>> -		!hwmgr->hwmgr_func->get_asic_baco_capability)
>>> +	if (!hwmgr->hwmgr_func->get_asic_baco_capability)
>>>    		return 0;
>>>
>>>    	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap); @@ -
>> 1321,7
>>> +1317,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
>>> +	if (!hwmgr->hwmgr_func->get_asic_baco_state)
>>>    		return 0;
>>>
>>>    	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum
>> BACO_STATE
>>> *)state); @@ -1336,8 +1332,7 @@ static int pp_set_asic_baco_state(void
>> *handle, int state)
>>>    	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>> -	if (!(hwmgr->not_vf && amdgpu_dpm) ||
>>> -		!hwmgr->hwmgr_func->set_asic_baco_state)
>>> +	if (!hwmgr->hwmgr_func->set_asic_baco_state)
>>>    		return 0;
>>>
>>>    	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum
>>> BACO_STATE)state); @@ -1379,7 +1374,7 @@ static int
>> pp_asic_reset_mode_2(void *handle)
>>>    {
>>>    	struct pp_hwmgr *hwmgr = handle;
>>>
>>> -	if (!hwmgr || !hwmgr->pm_en)
>>> +	if (!hwmgr)
>>>    		return -EINVAL;
>>>
>>>    	if (hwmgr->hwmgr_func->asic_reset == NULL) { @@ -1517,6
>> +1512,34 @@
>>> static void pp_pm_compute_clocks(void *handle)
>>>    			      NULL);
>>>    }
>>>
>>> +/* MP Apertures */
>>> +#define MP1_Public					0x03b00000
>>> +#define smnMP1_FIRMWARE_FLAGS
>> 	0x3010028
>>> +#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
>> 	0x00000001L
>>> +
>>> +static bool pp_is_smc_alive(void *handle) {
>>> +	struct pp_hwmgr *hwmgr = handle;
>>> +	struct amdgpu_device *adev = hwmgr->adev;
>>> +	uint32_t mp1_fw_flags;
>>> +
>>> +	/*
>>> +	 * If some ASIC(e.g. smu7/smu8) needs special handling for
>>> +	 * checking smc alive, it should have its own implementation
>>> +	 * for ->is_smc_alive.
>>> +	 */
>>> +	if (hwmgr->hwmgr_func->is_smc_alive)
>>> +		return hwmgr->hwmgr_func->is_smc_alive(hwmgr);
>>> +
>>> +	mp1_fw_flags = RREG32_PCIE(MP1_Public |
>>> +				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
>>> +
>>
>> The flags check doesn't tell whether PMFW is hung or not. It is a minimal
>> thing that is set after PMFW boot. To call the API this condition is necessary in
>> an implicit way. Driver always check this on boot, if not driver aborts smu init.
>>
>> So better thing is to go ahead and send the message without any check, it will
>> tell the result whether PMFW is really working or not.
>>
>> In short this API is not needed.
> [Quan, Evan] It was not designed to cover "PMFW hung". Instead, it was designed to be support early phase of post-silicon bringup.
> At that time, the SMU may be not enabled/up. We need to prevent this API from wrongly called.
> 

One of the first things done, atleast in swsmu, is hw_init/resume -> 
smu_start_smc_engine -> check_fw_status.

If smu is not up/enabled, this call shouldn't even happen as init itself 
will fail.

Thanks,
Lijo

> BR
> Evan
>>
>> Thanks,
>> Lijo
>>
>>> +	if (mp1_fw_flags &
>> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
>>> +		return true;
>>> +
>>> +	return false;
>>> +}
>>> +
>>>    static const struct amd_pm_funcs pp_dpm_funcs = {
>>>    	.load_firmware = pp_dpm_load_fw,
>>>    	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
>> @@
>>> -1582,4 +1605,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
>>>    	.gfx_state_change_set = pp_gfx_state_change_set,
>>>    	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
>>>    	.pm_compute_clocks = pp_pm_compute_clocks,
>>> +	.is_smc_alive = pp_is_smc_alive,
>>>    };
>>> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
>>> b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
>>> index a1e11037831a..118039b96524 100644
>>> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
>>> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
>>> @@ -5735,6 +5735,7 @@ static const struct pp_hwmgr_func
>> smu7_hwmgr_funcs = {
>>>    	.get_asic_baco_state = smu7_baco_get_state,
>>>    	.set_asic_baco_state = smu7_baco_set_state,
>>>    	.power_off_asic = smu7_power_off_asic,
>>> +	.is_smc_alive = smu7_is_smc_ram_running,
>>>    };
>>>
>>>    uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff
>>> --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
>>> b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
>>> index b50fd4a4a3d1..fc4d58329f6d 100644
>>> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
>>> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
>>> @@ -2015,6 +2015,22 @@ static void smu8_dpm_powergate_vce(struct
>> pp_hwmgr *hwmgr, bool bgate)
>>>    	}
>>>    }
>>>
>>> +#define ixMP1_FIRMWARE_FLAGS
>> 	0x3008210
>>> +#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
>> 	0x00000001L
>>> +
>>> +static bool smu8_is_smc_running(struct pp_hwmgr *hwmgr) {
>>> +	struct amdgpu_device *adev = hwmgr->adev;
>>> +	uint32_t mp1_fw_flags;
>>> +
>>> +	mp1_fw_flags = RREG32_SMC(ixMP1_FIRMWARE_FLAGS);
>>> +
>>> +	if (mp1_fw_flags &
>> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
>>> +		return true;
>>> +
>>> +	return false;
>>> +}
>>> +
>>>    static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
>>>    	.backend_init = smu8_hwmgr_backend_init,
>>>    	.backend_fini = smu8_hwmgr_backend_fini, @@ -2047,6 +2063,7
>> @@
>>> static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
>>>    	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
>>>    	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
>>>    	.get_thermal_temperature_range =
>>> smu8_get_thermal_temperature_range,
>>> +	.is_smc_alive = smu8_is_smc_running,
>>>    };
>>>
>>>    int smu8_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git
>>> a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
>>> b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
>>> index 4f7f2f455301..790fc387752c 100644
>>> --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
>>> +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
>>> @@ -364,6 +364,7 @@ struct pp_hwmgr_func {
>>>    					bool disable);
>>>    	ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table);
>>>    	int (*gfx_state_change)(struct pp_hwmgr *hwmgr, uint32_t state);
>>> +	bool (*is_smc_alive)(struct pp_hwmgr *hwmgr);
>>>    };
>>>
>>>    struct pp_table_func {
>>> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>>> index 8b8feaf7aa0e..27a453fb4db7 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
>>> @@ -1845,9 +1845,6 @@ static int smu_set_mp1_state(void *handle,
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->pm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs &&
>>>    	    smu->ppt_funcs->set_mp1_state)
>>>    		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
>> @@ -2513,9
>>> +2510,6 @@ static int smu_get_baco_capability(void *handle, bool *cap)
>>>
>>>    	*cap = false;
>>>
>>> -	if (!smu->pm_enabled)
>>> -		return 0;
>>> -
>>>    	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
>>>    		*cap = smu->ppt_funcs->baco_is_support(smu);
>>>
>>> @@ -2527,9 +2521,6 @@ static int smu_baco_set_state(void *handle, int
>> state)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->pm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (state == 0) {
>>>    		if (smu->ppt_funcs->baco_exit)
>>>    			ret = smu->ppt_funcs->baco_exit(smu); @@ -2551,9
>> +2542,6 @@ bool
>>> smu_mode1_reset_is_support(struct smu_context *smu)
>>>    {
>>>    	bool ret = false;
>>>
>>> -	if (!smu->pm_enabled)
>>> -		return false;
>>> -
>>>    	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
>>>    		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
>>>
>>> @@ -2564,9 +2552,6 @@ int smu_mode1_reset(struct smu_context *smu)
>>>    {
>>>    	int ret = 0;
>>>
>>> -	if (!smu->pm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->mode1_reset)
>>>    		ret = smu->ppt_funcs->mode1_reset(smu);
>>>
>>> @@ -2578,9 +2563,6 @@ static int smu_mode2_reset(void *handle)
>>>    	struct smu_context *smu = handle;
>>>    	int ret = 0;
>>>
>>> -	if (!smu->pm_enabled)
>>> -		return -EOPNOTSUPP;
>>> -
>>>    	if (smu->ppt_funcs->mode2_reset)
>>>    		ret = smu->ppt_funcs->mode2_reset(smu);
>>>
>>> @@ -2712,6 +2694,19 @@ static int smu_get_prv_buffer_details(void
>> *handle, void **addr, size_t *size)
>>>    	return 0;
>>>    }
>>>
>>> +static bool smu_is_smc_alive(void *handle) {
>>> +	struct smu_context *smu = handle;
>>> +
>>> +	if (!smu->ppt_funcs->check_fw_status)
>>> +		return false;
>>> +
>>> +	if (!smu->ppt_funcs->check_fw_status(smu))
>>> +		return true;
>>> +
>>> +	return false;
>>> +}
>>> +
>>>    static const struct amd_pm_funcs swsmu_pm_funcs = {
>>>    	/* export for sysfs */
>>>    	.set_fan_control_mode    = smu_set_fan_control_mode,
>>> @@ -2765,6 +2760,7 @@ static const struct amd_pm_funcs
>> swsmu_pm_funcs = {
>>>    	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
>>>    	.get_dpm_clock_table              = smu_get_dpm_clock_table,
>>>    	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
>>> +	.is_smc_alive = smu_is_smc_alive,
>>>    };
>>>
>>>    int smu_wait_for_event(struct smu_context *smu, enum
>> smu_event_type
>>> event,
>>>

^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2022-02-17  5:01 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-11  7:51 [PATCH 01/12] drm/amd/pm: drop unused structure members Evan Quan
2022-02-11  7:51 ` [PATCH 02/12] drm/amd/pm: drop unused interfaces Evan Quan
2022-02-11  7:52 ` [PATCH 03/12] drm/amd/pm: drop unneeded !smu->pm_enabled check Evan Quan
2022-02-11  7:52 ` [PATCH 04/12] drm/amd/pm: use adev->pm.dpm_enabled for dpm enablement check Evan Quan
2022-02-11  7:52 ` [PATCH 05/12] drm/amd/pm: move the check for dpm enablement to amdgpu_dpm.c Evan Quan
2022-02-11  8:06   ` Chen, Guchun
2022-02-17  1:53     ` Quan, Evan
2022-02-11 13:39   ` Lazar, Lijo
2022-02-17  2:35     ` Quan, Evan
2022-02-17  4:55       ` Lazar, Lijo
2022-02-11  7:52 ` [PATCH 06/12] drm/amd/pm: correct the checks for sriov(pp_one_vf) Evan Quan
2022-02-11  7:52 ` [PATCH 07/12] drm/amd/pm: correct the checks for granting gpu reset APIs Evan Quan
2022-02-14  4:04   ` Lazar, Lijo
2022-02-17  2:48     ` Quan, Evan
2022-02-17  5:00       ` Lazar, Lijo
2022-02-11  7:52 ` [PATCH 08/12] drm/amd/pm: add proper check for amdgpu_dpm before granting pp_dpm_load_fw Evan Quan
2022-02-11  7:52 ` [PATCH 09/12] drm/amd/pm: drop redundant !pp_funcs check Evan Quan
2022-02-11  7:52 ` [PATCH 10/12] drm/amd/pm: drop nonsense !smu->ppt_funcs check Evan Quan
2022-02-11  7:52 ` [PATCH 11/12] drm/amd/pm: drop extra non-necessary null pointers checks Evan Quan
2022-02-11  7:52 ` [PATCH 12/12] drm/amd/pm: revise the implementations for asic reset Evan Quan
2022-02-11 13:21   ` Lazar, Lijo
2022-02-17  2:53     ` Quan, Evan
2022-02-11  7:55 ` [PATCH 01/12] drm/amd/pm: drop unused structure members Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.