All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] drm/amd/pp: Replace files name rv_* with smu10_*
@ 2018-03-06 10:10 Rex Zhu
       [not found] ` <1520331047-10759-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 4+ messages in thread
From: Rex Zhu @ 2018-03-06 10:10 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Rex Zhu

Powerplay is for the hw ip smu, for RV, smu10 is used,
so use smu10 as the prefix of the files name.

Change-Id: Icf9c8a9d4b5deccd4fbfb9ecfab443db79934c77
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/Makefile       |    2 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     | 1075 -------------------
 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h     |  322 ------
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  | 1076 ++++++++++++++++++++
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h  |  322 ++++++
 drivers/gpu/drm/amd/powerplay/smumgr/Makefile      |    2 +-
 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c   |  399 --------
 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h   |   61 --
 .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c    |  399 ++++++++
 .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h    |   61 ++
 10 files changed, 1860 insertions(+), 1859 deletions(-)
 delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
 delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
 delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
 delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index e8c5a4f..1fa9a97 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -30,7 +30,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
 		smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
 		smu7_clockpowergating.o \
 		vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
-		vega10_thermal.o rv_hwmgr.o pp_psm.o\
+		vega10_thermal.o smu10_hwmgr.o pp_psm.o\
 		pp_overdriver.o
 
 AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
deleted file mode 100644
index 8ddfb78..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ /dev/null
@@ -1,1075 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "pp_debug.h"
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "atom-types.h"
-#include "atombios.h"
-#include "processpptables.h"
-#include "cgs_common.h"
-#include "smumgr.h"
-#include "hwmgr.h"
-#include "hardwaremanager.h"
-#include "rv_ppsmc.h"
-#include "rv_hwmgr.h"
-#include "power_state.h"
-#include "rv_smumgr.h"
-#include "pp_soc15.h"
-
-#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID     5
-#define RAVEN_MINIMUM_ENGINE_CLOCK         800   /* 8Mhz, the low boundary of engine clock allowed on this chip */
-#define SCLK_MIN_DIV_INTV_SHIFT         12
-#define RAVEN_DISPCLK_BYPASS_THRESHOLD     10000 /* 100Mhz */
-#define SMC_RAM_END                     0x40000
-
-static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic;
-
-
-int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
-		struct pp_display_clock_request *clock_req);
-
-
-static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps)
-{
-	if (PhwRaven_Magic != hw_ps->magic)
-		return NULL;
-
-	return (struct rv_power_state *)hw_ps;
-}
-
-static const struct rv_power_state *cast_const_rv_ps(
-				const struct pp_hw_power_state *hw_ps)
-{
-	if (PhwRaven_Magic != hw_ps->magic)
-		return NULL;
-
-	return (struct rv_power_state *)hw_ps;
-}
-
-static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-	struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend);
-
-	rv_hwmgr->dce_slow_sclk_threshold = 30000;
-	rv_hwmgr->thermal_auto_throttling_treshold = 0;
-	rv_hwmgr->is_nb_dpm_enabled = 1;
-	rv_hwmgr->dpm_flags = 1;
-	rv_hwmgr->gfx_off_controled_by_driver = false;
-	rv_hwmgr->need_min_deep_sleep_dcefclk = true;
-	rv_hwmgr->num_active_display = 0;
-	rv_hwmgr->deep_sleep_dcefclk = 0;
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_SclkDeepSleep);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_SclkThrottleLowNotification);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_PowerPlaySupport);
-	return 0;
-}
-
-static int rv_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
-			struct phm_clock_and_voltage_limits *table)
-{
-	return 0;
-}
-
-static int rv_init_dynamic_state_adjustment_rule_settings(
-							struct pp_hwmgr *hwmgr)
-{
-	uint32_t table_size =
-		sizeof(struct phm_clock_voltage_dependency_table) +
-		(7 * sizeof(struct phm_clock_voltage_dependency_record));
-
-	struct phm_clock_voltage_dependency_table *table_clk_vlt =
-					kzalloc(table_size, GFP_KERNEL);
-
-	if (NULL == table_clk_vlt) {
-		pr_err("Can not allocate memory!\n");
-		return -ENOMEM;
-	}
-
-	table_clk_vlt->count = 8;
-	table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
-	table_clk_vlt->entries[0].v = 0;
-	table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
-	table_clk_vlt->entries[1].v = 1;
-	table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
-	table_clk_vlt->entries[2].v = 2;
-	table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
-	table_clk_vlt->entries[3].v = 3;
-	table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
-	table_clk_vlt->entries[4].v = 4;
-	table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
-	table_clk_vlt->entries[5].v = 5;
-	table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
-	table_clk_vlt->entries[6].v = 6;
-	table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
-	table_clk_vlt->entries[7].v = 7;
-	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
-
-	return 0;
-}
-
-static int rv_get_system_info_data(struct pp_hwmgr *hwmgr)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)hwmgr->backend;
-
-	rv_data->sys_info.htc_hyst_lmt = 5;
-	rv_data->sys_info.htc_tmp_lmt = 203;
-
-	if (rv_data->thermal_auto_throttling_treshold == 0)
-		 rv_data->thermal_auto_throttling_treshold = 203;
-
-	rv_construct_max_power_limits_table (hwmgr,
-				    &hwmgr->dyn_state.max_clock_voltage_on_ac);
-
-	rv_init_dynamic_state_adjustment_rule_settings(hwmgr);
-
-	return 0;
-}
-
-static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
-{
-	return 0;
-}
-
-static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
-{
-	struct PP_Clocks clocks = {0};
-	struct pp_display_clock_request clock_req;
-
-	clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
-	clock_req.clock_type = amd_pp_dcf_clock;
-	clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
-
-	PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
-				"Attempt to set DCF Clock Failed!", return -EINVAL);
-
-	return 0;
-}
-
-static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) {
-		rv_data->deep_sleep_dcefclk = clock/100;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_SetMinDeepSleepDcefclk,
-					rv_data->deep_sleep_dcefclk);
-	}
-	return 0;
-}
-
-static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	if (rv_data->num_active_display != count) {
-		rv_data->num_active_display = count;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetDisplayCount,
-				rv_data->num_active_display);
-	}
-
-	return 0;
-}
-
-static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
-	return rv_set_clock_limit(hwmgr, input);
-}
-
-static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	rv_data->vcn_power_gated = true;
-	rv_data->isp_tileA_power_gated = true;
-	rv_data->isp_tileB_power_gated = true;
-
-	return 0;
-}
-
-
-static int rv_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-	return rv_init_power_gate_state(hwmgr);
-}
-
-static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	rv_data->separation_time = 0;
-	rv_data->cc6_disable = false;
-	rv_data->pstate_disable = false;
-	rv_data->cc6_setting_changed = false;
-
-	return 0;
-}
-
-static int rv_power_off_asic(struct pp_hwmgr *hwmgr)
-{
-	return rv_reset_cc6_data(hwmgr);
-}
-
-static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	if (rv_data->gfx_off_controled_by_driver)
-		smum_send_msg_to_smc(hwmgr,
-						PPSMC_MSG_DisableGfxOff);
-
-	return 0;
-}
-
-static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	return rv_disable_gfx_off(hwmgr);
-}
-
-static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	if (rv_data->gfx_off_controled_by_driver)
-		smum_send_msg_to_smc(hwmgr,
-						PPSMC_MSG_EnableGfxOff);
-
-	return 0;
-}
-
-static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	return rv_enable_gfx_off(hwmgr);
-}
-
-static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-				struct pp_power_state  *prequest_ps,
-			const struct pp_power_state *pcurrent_ps)
-{
-	return 0;
-}
-
-/* temporary hardcoded clock voltage breakdown tables */
-static const DpmClock_t VddDcfClk[]= {
-	{ 300, 2600},
-	{ 600, 3200},
-	{ 600, 3600},
-};
-
-static const DpmClock_t VddSocClk[]= {
-	{ 478, 2600},
-	{ 722, 3200},
-	{ 722, 3600},
-};
-
-static const DpmClock_t VddFClk[]= {
-	{ 400, 2600},
-	{1200, 3200},
-	{1200, 3600},
-};
-
-static const DpmClock_t VddDispClk[]= {
-	{ 435, 2600},
-	{ 661, 3200},
-	{1086, 3600},
-};
-
-static const DpmClock_t VddDppClk[]= {
-	{ 435, 2600},
-	{ 661, 3200},
-	{ 661, 3600},
-};
-
-static const DpmClock_t VddPhyClk[]= {
-	{ 540, 2600},
-	{ 810, 3200},
-	{ 810, 3600},
-};
-
-static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
-			struct rv_voltage_dependency_table **pptable,
-			uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
-{
-	uint32_t table_size, i;
-	struct rv_voltage_dependency_table *ptable;
-
-	table_size = sizeof(uint32_t) + sizeof(struct rv_voltage_dependency_table) * num_entry;
-	ptable = kzalloc(table_size, GFP_KERNEL);
-
-	if (NULL == ptable)
-		return -ENOMEM;
-
-	ptable->count = num_entry;
-
-	for (i = 0; i < ptable->count; i++) {
-		ptable->entries[i].clk         = pclk_dependency_table->Freq * 100;
-		ptable->entries[i].vol         = pclk_dependency_table->Vol;
-		pclk_dependency_table++;
-	}
-
-	*pptable = ptable;
-
-	return 0;
-}
-
-
-static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-	DpmClocks_t  *table = &(rv_data->clock_table);
-	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
-
-	result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE);
-
-	PP_ASSERT_WITH_CODE((0 == result),
-			"Attempt to copy clock table from smc failed",
-			return result);
-
-	if (0 == result && table->DcefClocks[0].Freq != 0) {
-		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
-						NUM_DCEFCLK_DPM_LEVELS,
-						&rv_data->clock_table.DcefClocks[0]);
-		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
-						NUM_SOCCLK_DPM_LEVELS,
-						&rv_data->clock_table.SocClocks[0]);
-		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
-						NUM_FCLK_DPM_LEVELS,
-						&rv_data->clock_table.FClocks[0]);
-		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
-						NUM_MEMCLK_DPM_LEVELS,
-						&rv_data->clock_table.MemClocks[0]);
-	} else {
-		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
-						ARRAY_SIZE(VddDcfClk),
-						&VddDcfClk[0]);
-		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
-						ARRAY_SIZE(VddSocClk),
-						&VddSocClk[0]);
-		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
-						ARRAY_SIZE(VddFClk),
-						&VddFClk[0]);
-	}
-	rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
-					ARRAY_SIZE(VddDispClk),
-					&VddDispClk[0]);
-	rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
-					ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
-	rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
-					ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
-
-	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-			PPSMC_MSG_GetMinGfxclkFrequency),
-			"Attempt to get min GFXCLK Failed!",
-			return -1);
-	PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
-			&result),
-			"Attempt to get min GFXCLK Failed!",
-			return -1);
-	rv_data->gfx_min_freq_limit = result * 100;
-
-	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-			PPSMC_MSG_GetMaxGfxclkFrequency),
-			"Attempt to get max GFXCLK Failed!",
-			return -1);
-	PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
-			&result),
-			"Attempt to get max GFXCLK Failed!",
-			return -1);
-	rv_data->gfx_max_freq_limit = result * 100;
-
-	return 0;
-}
-
-static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-	int result = 0;
-	struct rv_hwmgr *data;
-
-	data = kzalloc(sizeof(struct rv_hwmgr), GFP_KERNEL);
-	if (data == NULL)
-		return -ENOMEM;
-
-	hwmgr->backend = data;
-
-	result = rv_initialize_dpm_defaults(hwmgr);
-	if (result != 0) {
-		pr_err("rv_initialize_dpm_defaults failed\n");
-		return result;
-	}
-
-	rv_populate_clock_table(hwmgr);
-
-	result = rv_get_system_info_data(hwmgr);
-	if (result != 0) {
-		pr_err("rv_get_system_info_data failed\n");
-		return result;
-	}
-
-	rv_construct_boot_state(hwmgr);
-
-	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
-						RAVEN_MAX_HARDWARE_POWERLEVELS;
-
-	hwmgr->platform_descriptor.hardwarePerformanceLevels =
-						RAVEN_MAX_HARDWARE_POWERLEVELS;
-
-	hwmgr->platform_descriptor.vbiosInterruptId = 0;
-
-	hwmgr->platform_descriptor.clockStep.engineClock = 500;
-
-	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
-
-	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
-	hwmgr->pstate_sclk = RAVEN_UMD_PSTATE_GFXCLK;
-	hwmgr->pstate_mclk = RAVEN_UMD_PSTATE_FCLK;
-
-	return result;
-}
-
-static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
-
-	kfree(pinfo->vdd_dep_on_dcefclk);
-	pinfo->vdd_dep_on_dcefclk = NULL;
-	kfree(pinfo->vdd_dep_on_socclk);
-	pinfo->vdd_dep_on_socclk = NULL;
-	kfree(pinfo->vdd_dep_on_fclk);
-	pinfo->vdd_dep_on_fclk = NULL;
-	kfree(pinfo->vdd_dep_on_dispclk);
-	pinfo->vdd_dep_on_dispclk = NULL;
-	kfree(pinfo->vdd_dep_on_dppclk);
-	pinfo->vdd_dep_on_dppclk = NULL;
-	kfree(pinfo->vdd_dep_on_phyclk);
-	pinfo->vdd_dep_on_phyclk = NULL;
-
-	kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
-	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
-
-	kfree(hwmgr->backend);
-	hwmgr->backend = NULL;
-
-	return 0;
-}
-
-static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
-				enum amd_dpm_forced_level level)
-{
-	if (hwmgr->smu_version < 0x1E3700) {
-		pr_info("smu firmware version too old, can not set dpm level\n");
-		return 0;
-	}
-
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinGfxClk,
-						RAVEN_UMD_PSTATE_PEAK_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinFclkByFreq,
-						RAVEN_UMD_PSTATE_PEAK_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinSocclkByFreq,
-						RAVEN_UMD_PSTATE_PEAK_SOCCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinVcn,
-						RAVEN_UMD_PSTATE_VCE);
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxGfxClk,
-						RAVEN_UMD_PSTATE_PEAK_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxFclkByFreq,
-						RAVEN_UMD_PSTATE_PEAK_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxSocclkByFreq,
-						RAVEN_UMD_PSTATE_PEAK_SOCCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxVcn,
-						RAVEN_UMD_PSTATE_VCE);
-		break;
-	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinGfxClk,
-						RAVEN_UMD_PSTATE_MIN_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxGfxClk,
-						RAVEN_UMD_PSTATE_MIN_GFXCLK);
-		break;
-	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinFclkByFreq,
-						RAVEN_UMD_PSTATE_MIN_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxFclkByFreq,
-						RAVEN_UMD_PSTATE_MIN_FCLK);
-		break;
-	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinGfxClk,
-						RAVEN_UMD_PSTATE_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinFclkByFreq,
-						RAVEN_UMD_PSTATE_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinSocclkByFreq,
-						RAVEN_UMD_PSTATE_SOCCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinVcn,
-						RAVEN_UMD_PSTATE_VCE);
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxGfxClk,
-						RAVEN_UMD_PSTATE_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxFclkByFreq,
-						RAVEN_UMD_PSTATE_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxSocclkByFreq,
-						RAVEN_UMD_PSTATE_SOCCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxVcn,
-						RAVEN_UMD_PSTATE_VCE);
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinGfxClk,
-						RAVEN_UMD_PSTATE_MIN_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinFclkByFreq,
-						RAVEN_UMD_PSTATE_MIN_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinSocclkByFreq,
-						RAVEN_UMD_PSTATE_MIN_SOCCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinVcn,
-						RAVEN_UMD_PSTATE_MIN_VCE);
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxGfxClk,
-						RAVEN_UMD_PSTATE_PEAK_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxFclkByFreq,
-						RAVEN_UMD_PSTATE_PEAK_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxSocclkByFreq,
-						RAVEN_UMD_PSTATE_PEAK_SOCCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxVcn,
-						RAVEN_UMD_PSTATE_VCE);
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinGfxClk,
-						RAVEN_UMD_PSTATE_MIN_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxGfxClk,
-						RAVEN_UMD_PSTATE_MIN_GFXCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetHardMinFclkByFreq,
-						RAVEN_UMD_PSTATE_MIN_FCLK);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSoftMaxFclkByFreq,
-						RAVEN_UMD_PSTATE_MIN_FCLK);
-		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
-	default:
-		break;
-	}
-	return 0;
-}
-
-static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct rv_hwmgr *data;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	if (low)
-		return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
-	else
-		return data->clock_vol_info.vdd_dep_on_fclk->entries[
-			data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
-}
-
-static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct rv_hwmgr *data;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	if (low)
-		return data->gfx_min_freq_limit;
-	else
-		return data->gfx_max_freq_limit;
-}
-
-static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
-					struct pp_hw_power_state *hw_ps)
-{
-	return 0;
-}
-
-static int rv_dpm_get_pp_table_entry_callback(
-						     struct pp_hwmgr *hwmgr,
-					   struct pp_hw_power_state *hw_ps,
-							  unsigned int index,
-						     const void *clock_info)
-{
-	struct rv_power_state *rv_ps = cast_rv_ps(hw_ps);
-
-	rv_ps->levels[index].engine_clock = 0;
-
-	rv_ps->levels[index].vddc_index = 0;
-	rv_ps->level = index + 1;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-		rv_ps->levels[index].ds_divider_index = 5;
-		rv_ps->levels[index].ss_divider_index = 5;
-	}
-
-	return 0;
-}
-
-static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	unsigned long ret = 0;
-
-	result = pp_tables_get_num_of_entries(hwmgr, &ret);
-
-	return result ? 0 : ret;
-}
-
-static int rv_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-		    unsigned long entry, struct pp_power_state *ps)
-{
-	int result;
-	struct rv_power_state *rv_ps;
-
-	ps->hardware.magic = PhwRaven_Magic;
-
-	rv_ps = cast_rv_ps(&(ps->hardware));
-
-	result = pp_tables_get_entry(hwmgr, entry, ps,
-			rv_dpm_get_pp_table_entry_callback);
-
-	rv_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
-	rv_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
-
-	return result;
-}
-
-static int rv_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-	return sizeof(struct rv_power_state);
-}
-
-static int rv_set_cpu_power_state(struct pp_hwmgr *hwmgr)
-{
-	return 0;
-}
-
-
-static int rv_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
-			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
-{
-	return 0;
-}
-
-static int rv_get_dal_power_level(struct pp_hwmgr *hwmgr,
-		struct amd_pp_simple_clock_info *info)
-{
-	return -EINVAL;
-}
-
-static int rv_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, uint32_t mask)
-{
-	return 0;
-}
-
-static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, char *buf)
-{
-	struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend);
-	struct rv_voltage_dependency_table *mclk_table =
-			data->clock_vol_info.vdd_dep_on_fclk;
-	int i, now, size = 0;
-
-	switch (type) {
-	case PP_SCLK:
-		PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-				PPSMC_MSG_GetGfxclkFrequency),
-				"Attempt to get current GFXCLK Failed!",
-				return -1);
-		PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
-				&now),
-				"Attempt to get current GFXCLK Failed!",
-				return -1);
-
-		size += sprintf(buf + size, "0: %uMhz %s\n",
-				data->gfx_min_freq_limit / 100,
-				((data->gfx_min_freq_limit / 100)
-				 == now) ? "*" : "");
-		size += sprintf(buf + size, "1: %uMhz %s\n",
-				data->gfx_max_freq_limit / 100,
-				((data->gfx_max_freq_limit / 100)
-				 == now) ? "*" : "");
-		break;
-	case PP_MCLK:
-		PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-				PPSMC_MSG_GetFclkFrequency),
-				"Attempt to get current MEMCLK Failed!",
-				return -1);
-		PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
-				&now),
-				"Attempt to get current MEMCLK Failed!",
-				return -1);
-
-		for (i = 0; i < mclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i,
-					mclk_table->entries[i].clk / 100,
-					((mclk_table->entries[i].clk / 100)
-					 == now) ? "*" : "");
-		break;
-	default:
-		break;
-	}
-
-	return size;
-}
-
-static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
-				PHM_PerformanceLevelDesignation designation, uint32_t index,
-				PHM_PerformanceLevel *level)
-{
-	struct rv_hwmgr *data;
-
-	if (level == NULL || hwmgr == NULL || state == NULL)
-		return -EINVAL;
-
-	data = (struct rv_hwmgr *)(hwmgr->backend);
-
-	if (index == 0) {
-		level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
-		level->coreClock = data->gfx_min_freq_limit;
-	} else {
-		level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
-			data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
-		level->coreClock = data->gfx_max_freq_limit;
-	}
-
-	level->nonLocalMemoryFreq = 0;
-	level->nonLocalMemoryWidth = 0;
-
-	return 0;
-}
-
-static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
-	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
-{
-	const struct rv_power_state *ps = cast_const_rv_ps(state);
-
-	clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
-	clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
-
-	return 0;
-}
-
-#define MEM_FREQ_LOW_LATENCY        25000
-#define MEM_FREQ_HIGH_LATENCY       80000
-#define MEM_LATENCY_HIGH            245
-#define MEM_LATENCY_LOW             35
-#define MEM_LATENCY_ERR             0xFFFF
-
-
-static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
-		uint32_t clock)
-{
-	if (clock >= MEM_FREQ_LOW_LATENCY &&
-			clock < MEM_FREQ_HIGH_LATENCY)
-		return MEM_LATENCY_HIGH;
-	else if (clock >= MEM_FREQ_HIGH_LATENCY)
-		return MEM_LATENCY_LOW;
-	else
-		return MEM_LATENCY_ERR;
-}
-
-static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
-		enum amd_pp_clock_type type,
-		struct pp_clock_levels_with_latency *clocks)
-{
-	uint32_t i;
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
-	struct rv_voltage_dependency_table *pclk_vol_table;
-	bool latency_required = false;
-
-	if (pinfo == NULL)
-		return -EINVAL;
-
-	switch (type) {
-	case amd_pp_mem_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_mclk;
-		latency_required = true;
-		break;
-	case amd_pp_f_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_fclk;
-		latency_required = true;
-		break;
-	case amd_pp_dcf_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
-		break;
-	case amd_pp_disp_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_dispclk;
-		break;
-	case amd_pp_phy_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_phyclk;
-		break;
-	case amd_pp_dpp_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_dppclk;
-	default:
-		return -EINVAL;
-	}
-
-	if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
-		return -EINVAL;
-
-	clocks->num_levels = 0;
-	for (i = 0; i < pclk_vol_table->count; i++) {
-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
-		clocks->data[i].latency_in_us = latency_required ?
-						rv_get_mem_latency(hwmgr,
-						pclk_vol_table->entries[i].clk) :
-						0;
-		clocks->num_levels++;
-	}
-
-	return 0;
-}
-
-static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
-		enum amd_pp_clock_type type,
-		struct pp_clock_levels_with_voltage *clocks)
-{
-	uint32_t i;
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
-	struct rv_voltage_dependency_table *pclk_vol_table = NULL;
-
-	if (pinfo == NULL)
-		return -EINVAL;
-
-	switch (type) {
-	case amd_pp_mem_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_mclk;
-		break;
-	case amd_pp_f_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_fclk;
-		break;
-	case amd_pp_dcf_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
-		break;
-	case amd_pp_soc_clock:
-		pclk_vol_table = pinfo->vdd_dep_on_socclk;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
-		return -EINVAL;
-
-	clocks->num_levels = 0;
-	for (i = 0; i < pclk_vol_table->count; i++) {
-		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
-		clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
-		clocks->num_levels++;
-	}
-
-	return 0;
-}
-
-int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
-		struct pp_display_clock_request *clock_req)
-{
-	int result = 0;
-	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-	enum amd_pp_clock_type clk_type = clock_req->clock_type;
-	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
-	PPSMC_Msg        msg;
-
-	switch (clk_type) {
-	case amd_pp_dcf_clock:
-		if (clk_freq == rv_data->dcf_actual_hard_min_freq)
-			return 0;
-		msg =  PPSMC_MSG_SetHardMinDcefclkByFreq;
-		rv_data->dcf_actual_hard_min_freq = clk_freq;
-		break;
-	case amd_pp_soc_clock:
-		 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
-		break;
-	case amd_pp_f_clock:
-		if (clk_freq == rv_data->f_actual_hard_min_freq)
-			return 0;
-		rv_data->f_actual_hard_min_freq = clk_freq;
-		msg = PPSMC_MSG_SetHardMinFclkByFreq;
-		break;
-	default:
-		pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
-		return -EINVAL;
-	}
-
-	result = smum_send_msg_to_smc_with_parameter(hwmgr, msg,
-							clk_freq);
-
-	return result;
-}
-
-static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
-{
-	clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
-	return 0;
-}
-
-static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
-	uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
-			mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
-	uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset);
-	int cur_temp =
-		(reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
-
-	if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
-		cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-	else
-		cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-	return cur_temp;
-}
-
-static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
-			  void *value, int *size)
-{
-	uint32_t sclk, mclk;
-	int ret = 0;
-
-	switch (idx) {
-	case AMDGPU_PP_SENSOR_GFX_SCLK:
-		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
-		if (!ret) {
-			rv_read_arg_from_smc(hwmgr, &sclk);
-			/* in units of 10KHZ */
-			*((uint32_t *)value) = sclk * 100;
-			*size = 4;
-		}
-		break;
-	case AMDGPU_PP_SENSOR_GFX_MCLK:
-		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
-		if (!ret) {
-			rv_read_arg_from_smc(hwmgr, &mclk);
-			/* in units of 10KHZ */
-			*((uint32_t *)value) = mclk * 100;
-			*size = 4;
-		}
-		break;
-	case AMDGPU_PP_SENSOR_GPU_TEMP:
-		*((uint32_t *)value) = rv_thermal_get_temperature(hwmgr);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-static int rv_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
-{
-	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
-}
-
-static const struct pp_hwmgr_func rv_hwmgr_funcs = {
-	.backend_init = rv_hwmgr_backend_init,
-	.backend_fini = rv_hwmgr_backend_fini,
-	.asic_setup = NULL,
-	.apply_state_adjust_rules = rv_apply_state_adjust_rules,
-	.force_dpm_level = rv_dpm_force_dpm_level,
-	.get_power_state_size = rv_get_power_state_size,
-	.powerdown_uvd = NULL,
-	.powergate_uvd = NULL,
-	.powergate_vce = NULL,
-	.get_mclk = rv_dpm_get_mclk,
-	.get_sclk = rv_dpm_get_sclk,
-	.patch_boot_state = rv_dpm_patch_boot_state,
-	.get_pp_table_entry = rv_dpm_get_pp_table_entry,
-	.get_num_of_pp_table_entries = rv_dpm_get_num_of_pp_table_entries,
-	.set_cpu_power_state = rv_set_cpu_power_state,
-	.store_cc6_data = rv_store_cc6_data,
-	.force_clock_level = rv_force_clock_level,
-	.print_clock_levels = rv_print_clock_levels,
-	.get_dal_power_level = rv_get_dal_power_level,
-	.get_performance_level = rv_get_performance_level,
-	.get_current_shallow_sleep_clocks = rv_get_current_shallow_sleep_clocks,
-	.get_clock_by_type_with_latency = rv_get_clock_by_type_with_latency,
-	.get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage,
-	.get_max_high_clocks = rv_get_max_high_clocks,
-	.read_sensor = rv_read_sensor,
-	.set_active_display_count = rv_set_active_display_count,
-	.set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk,
-	.dynamic_state_management_enable = rv_enable_dpm_tasks,
-	.power_off_asic = rv_power_off_asic,
-	.asic_setup = rv_setup_asic_task,
-	.power_state_set = rv_set_power_state_tasks,
-	.dynamic_state_management_disable = rv_disable_dpm_tasks,
-	.set_mmhub_powergating_by_smu = rv_set_mmhub_powergating_by_smu,
-};
-
-int rv_init_function_pointers(struct pp_hwmgr *hwmgr)
-{
-	hwmgr->hwmgr_func = &rv_hwmgr_funcs;
-	hwmgr->pptable_func = &pptable_funcs;
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
deleted file mode 100644
index c3bc311..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef RAVEN_HWMGR_H
-#define RAVEN_HWMGR_H
-
-#include "hwmgr.h"
-#include "rv_inc.h"
-#include "smu10_driver_if.h"
-#include "rv_ppsmc.h"
-
-
-#define RAVEN_MAX_HARDWARE_POWERLEVELS               8
-#define PHMRAVEN_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS   15
-
-#define DPMFlags_SCLK_Enabled                     0x00000001
-#define DPMFlags_UVD_Enabled                      0x00000002
-#define DPMFlags_VCE_Enabled                      0x00000004
-#define DPMFlags_ACP_Enabled                      0x00000008
-#define DPMFlags_ForceHighestValid                0x40000000
-
-/* Do not change the following, it is also defined in SMU8.h */
-#define SMU_EnabledFeatureScoreboard_AcpDpmOn     0x00000001
-#define SMU_EnabledFeatureScoreboard_SclkDpmOn    0x00200000
-#define SMU_EnabledFeatureScoreboard_UvdDpmOn     0x01000000
-#define SMU_EnabledFeatureScoreboard_VceDpmOn     0x02000000
-
-#define SMU_PHYID_SHIFT      8
-
-#define RAVEN_PCIE_POWERGATING_TARGET_GFX            0
-#define RAVEN_PCIE_POWERGATING_TARGET_DDI            1
-#define RAVEN_PCIE_POWERGATING_TARGET_PLLCASCADE     2
-#define RAVEN_PCIE_POWERGATING_TARGET_PHY            3
-
-enum VQ_TYPE {
-	CLOCK_TYPE_DCLK = 0L,
-	CLOCK_TYPE_ECLK,
-	CLOCK_TYPE_SCLK,
-	CLOCK_TYPE_CCLK,
-	VQ_GFX_CU
-};
-
-#define SUSTAINABLE_SCLK_MASK  0x00ffffff
-#define SUSTAINABLE_SCLK_SHIFT 0
-#define SUSTAINABLE_CU_MASK    0xff000000
-#define SUSTAINABLE_CU_SHIFT   24
-
-struct rv_dpm_entry {
-	uint32_t soft_min_clk;
-	uint32_t hard_min_clk;
-	uint32_t soft_max_clk;
-	uint32_t hard_max_clk;
-};
-
-struct rv_power_level {
-	uint32_t engine_clock;
-	uint8_t vddc_index;
-	uint8_t ds_divider_index;
-	uint8_t ss_divider_index;
-	uint8_t allow_gnb_slow;
-	uint8_t force_nbp_state;
-	uint8_t display_wm;
-	uint8_t vce_wm;
-	uint8_t num_simd_to_powerdown;
-	uint8_t hysteresis_up;
-	uint8_t rsv[3];
-};
-
-/*used for the nbpsFlags field in rv_power state*/
-#define RAVEN_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
-#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
-#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
-
-#define RAVEN_POWERSTATE_FLAGS_BAPM_DISABLE    (1<<0)
-
-struct rv_uvd_clocks {
-	uint32_t vclk;
-	uint32_t dclk;
-	uint32_t vclk_low_divider;
-	uint32_t vclk_high_divider;
-	uint32_t dclk_low_divider;
-	uint32_t dclk_high_divider;
-};
-
-struct pp_disable_nbpslo_flags {
-	union {
-		struct {
-			uint32_t entry : 1;
-			uint32_t display : 1;
-			uint32_t driver: 1;
-			uint32_t vce : 1;
-			uint32_t uvd : 1;
-			uint32_t acp : 1;
-			uint32_t reserved: 26;
-		} bits;
-		uint32_t u32All;
-	};
-};
-
-
-enum rv_pstate_previous_action {
-	DO_NOTHING = 1,
-	FORCE_HIGH,
-	CANCEL_FORCE_HIGH
-};
-
-struct rv_power_state {
-	unsigned int magic;
-	uint32_t level;
-	struct rv_uvd_clocks uvd_clocks;
-	uint32_t evclk;
-	uint32_t ecclk;
-	uint32_t samclk;
-	uint32_t acpclk;
-	bool need_dfs_bypass;
-
-	uint32_t nbps_flags;
-	uint32_t bapm_flags;
-	uint8_t dpm0_pg_nbps_low;
-	uint8_t dpm0_pg_nbps_high;
-	uint8_t dpm_x_nbps_low;
-	uint8_t dpm_x_nbps_high;
-
-	enum rv_pstate_previous_action action;
-
-	struct rv_power_level levels[RAVEN_MAX_HARDWARE_POWERLEVELS];
-	struct pp_disable_nbpslo_flags nbpslo_flags;
-};
-
-#define RAVEN_NUM_NBPSTATES        4
-#define RAVEN_NUM_NBPMEMORYCLOCK   2
-
-
-struct rv_display_phy_info_entry {
-	uint8_t                   phy_present;
-	uint8_t                   active_lane_mapping;
-	uint8_t                   display_config_type;
-	uint8_t                   active_num_of_lanes;
-};
-
-#define RAVEN_MAX_DISPLAYPHY_IDS       10
-
-struct rv_display_phy_info {
-	bool                         display_phy_access_initialized;
-	struct rv_display_phy_info_entry  entries[RAVEN_MAX_DISPLAYPHY_IDS];
-};
-
-#define MAX_DISPLAY_CLOCK_LEVEL 8
-
-struct rv_system_info{
-	uint8_t                      htc_tmp_lmt;
-	uint8_t                      htc_hyst_lmt;
-};
-
-#define MAX_REGULAR_DPM_NUMBER 8
-
-struct rv_mclk_latency_entries {
-	uint32_t  frequency;
-	uint32_t  latency;
-};
-
-struct rv_mclk_latency_table {
-	uint32_t  count;
-	struct rv_mclk_latency_entries  entries[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct rv_clock_voltage_dependency_record {
-	uint32_t clk;
-	uint32_t vol;
-};
-
-
-struct rv_voltage_dependency_table {
-	uint32_t count;
-	struct rv_clock_voltage_dependency_record entries[1];
-};
-
-struct rv_clock_voltage_information {
-	struct rv_voltage_dependency_table    *vdd_dep_on_dcefclk;
-	struct rv_voltage_dependency_table    *vdd_dep_on_socclk;
-	struct rv_voltage_dependency_table    *vdd_dep_on_fclk;
-	struct rv_voltage_dependency_table    *vdd_dep_on_mclk;
-	struct rv_voltage_dependency_table    *vdd_dep_on_dispclk;
-	struct rv_voltage_dependency_table    *vdd_dep_on_dppclk;
-	struct rv_voltage_dependency_table    *vdd_dep_on_phyclk;
-};
-
-struct rv_hwmgr {
-	uint32_t disable_driver_thermal_policy;
-	uint32_t thermal_auto_throttling_treshold;
-	struct rv_system_info sys_info;
-	struct rv_mclk_latency_table mclk_latency_table;
-
-	uint32_t ddi_power_gating_disabled;
-
-	struct rv_display_phy_info_entry            display_phy_info;
-	uint32_t dce_slow_sclk_threshold;
-
-	bool disp_clk_bypass;
-	bool disp_clk_bypass_pending;
-	uint32_t bapm_enabled;
-
-	bool video_start;
-	bool battery_state;
-
-	uint32_t is_nb_dpm_enabled;
-	uint32_t is_voltage_island_enabled;
-	uint32_t disable_smu_acp_s3_handshake;
-	uint32_t disable_notify_smu_vpu_recovery;
-	bool                           in_vpu_recovery;
-	bool pg_acp_init;
-	uint8_t disp_config;
-
-	/* PowerTune */
-	uint32_t power_containment_features;
-	bool cac_enabled;
-	bool disable_uvd_power_tune_feature;
-	bool enable_bapm_feature;
-	bool enable_tdc_limit_feature;
-
-
-	/* SMC SRAM Address of firmware header tables */
-	uint32_t sram_end;
-	uint32_t dpm_table_start;
-	uint32_t soft_regs_start;
-
-	/* start of SMU7_Fusion_DpmTable */
-
-	uint8_t uvd_level_count;
-	uint8_t vce_level_count;
-	uint8_t acp_level_count;
-	uint8_t samu_level_count;
-
-	uint32_t fps_high_threshold;
-	uint32_t fps_low_threshold;
-
-	uint32_t dpm_flags;
-	struct rv_dpm_entry sclk_dpm;
-	struct rv_dpm_entry uvd_dpm;
-	struct rv_dpm_entry vce_dpm;
-	struct rv_dpm_entry acp_dpm;
-	bool acp_power_up_no_dsp;
-
-	uint32_t max_sclk_level;
-	uint32_t num_of_clk_entries;
-
-	/* CPU Power State */
-	uint32_t                          separation_time;
-	bool                              cc6_disable;
-	bool                              pstate_disable;
-	bool                              cc6_setting_changed;
-
-	uint32_t                             ulTotalActiveCUs;
-
-	bool                           isp_tileA_power_gated;
-	bool                           isp_tileB_power_gated;
-	uint32_t                       isp_actual_hard_min_freq;
-	uint32_t                       soc_actual_hard_min_freq;
-	uint32_t                       dcf_actual_hard_min_freq;
-
-	uint32_t                        f_actual_hard_min_freq;
-	uint32_t                        fabric_actual_soft_min_freq;
-	uint32_t                        vclk_soft_min;
-	uint32_t                        dclk_soft_min;
-	uint32_t                        gfx_actual_soft_min_freq;
-	uint32_t                        gfx_min_freq_limit;
-	uint32_t                        gfx_max_freq_limit;
-
-	bool                           vcn_power_gated;
-	bool                           vcn_dpg_mode;
-
-	bool                           gfx_off_controled_by_driver;
-	Watermarks_t                      water_marks_table;
-	struct rv_clock_voltage_information   clock_vol_info;
-	DpmClocks_t                       clock_table;
-
-	uint32_t active_process_mask;
-	bool need_min_deep_sleep_dcefclk;
-	uint32_t                             deep_sleep_dcefclk;
-	uint32_t                             num_active_display;
-};
-
-struct pp_hwmgr;
-
-int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
-
-/* UMD PState Raven Msg Parameters in MHz */
-#define RAVEN_UMD_PSTATE_GFXCLK                 700
-#define RAVEN_UMD_PSTATE_SOCCLK                 626
-#define RAVEN_UMD_PSTATE_FCLK                   933
-#define RAVEN_UMD_PSTATE_VCE                    0x03C00320
-
-#define RAVEN_UMD_PSTATE_PEAK_GFXCLK            1100
-#define RAVEN_UMD_PSTATE_PEAK_SOCCLK            757
-#define RAVEN_UMD_PSTATE_PEAK_FCLK              1200
-
-#define RAVEN_UMD_PSTATE_MIN_GFXCLK             200
-#define RAVEN_UMD_PSTATE_MIN_FCLK               400
-#define RAVEN_UMD_PSTATE_MIN_SOCCLK             200
-#define RAVEN_UMD_PSTATE_MIN_VCE                0x0190012C
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
new file mode 100644
index 0000000..350dde9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -0,0 +1,1076 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "pp_debug.h"
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "atom-types.h"
+#include "atombios.h"
+#include "processpptables.h"
+#include "cgs_common.h"
+#include "smumgr.h"
+#include "hwmgr.h"
+#include "hardwaremanager.h"
+#include "rv_ppsmc.h"
+#include "smu10_hwmgr.h"
+#include "power_state.h"
+#include "smu10_smumgr.h"
+#include "pp_soc15.h"
+
+
+#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID     5
+#define RAVEN_MINIMUM_ENGINE_CLOCK         800   /* 8Mhz, the low boundary of engine clock allowed on this chip */
+#define SCLK_MIN_DIV_INTV_SHIFT         12
+#define RAVEN_DISPCLK_BYPASS_THRESHOLD     10000 /* 100Mhz */
+#define SMC_RAM_END                     0x40000
+
+static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic;
+
+
+int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+		struct pp_display_clock_request *clock_req);
+
+
+static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps)
+{
+	if (PhwRaven_Magic != hw_ps->magic)
+		return NULL;
+
+	return (struct rv_power_state *)hw_ps;
+}
+
+static const struct rv_power_state *cast_const_rv_ps(
+				const struct pp_hw_power_state *hw_ps)
+{
+	if (PhwRaven_Magic != hw_ps->magic)
+		return NULL;
+
+	return (struct rv_power_state *)hw_ps;
+}
+
+static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend);
+
+	rv_hwmgr->dce_slow_sclk_threshold = 30000;
+	rv_hwmgr->thermal_auto_throttling_treshold = 0;
+	rv_hwmgr->is_nb_dpm_enabled = 1;
+	rv_hwmgr->dpm_flags = 1;
+	rv_hwmgr->gfx_off_controled_by_driver = false;
+	rv_hwmgr->need_min_deep_sleep_dcefclk = true;
+	rv_hwmgr->num_active_display = 0;
+	rv_hwmgr->deep_sleep_dcefclk = 0;
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SclkDeepSleep);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_SclkThrottleLowNotification);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_PowerPlaySupport);
+	return 0;
+}
+
+static int rv_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
+			struct phm_clock_and_voltage_limits *table)
+{
+	return 0;
+}
+
+static int rv_init_dynamic_state_adjustment_rule_settings(
+							struct pp_hwmgr *hwmgr)
+{
+	uint32_t table_size =
+		sizeof(struct phm_clock_voltage_dependency_table) +
+		(7 * sizeof(struct phm_clock_voltage_dependency_record));
+
+	struct phm_clock_voltage_dependency_table *table_clk_vlt =
+					kzalloc(table_size, GFP_KERNEL);
+
+	if (NULL == table_clk_vlt) {
+		pr_err("Can not allocate memory!\n");
+		return -ENOMEM;
+	}
+
+	table_clk_vlt->count = 8;
+	table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
+	table_clk_vlt->entries[0].v = 0;
+	table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
+	table_clk_vlt->entries[1].v = 1;
+	table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
+	table_clk_vlt->entries[2].v = 2;
+	table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
+	table_clk_vlt->entries[3].v = 3;
+	table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
+	table_clk_vlt->entries[4].v = 4;
+	table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
+	table_clk_vlt->entries[5].v = 5;
+	table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
+	table_clk_vlt->entries[6].v = 6;
+	table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
+	table_clk_vlt->entries[7].v = 7;
+	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
+
+	return 0;
+}
+
+static int rv_get_system_info_data(struct pp_hwmgr *hwmgr)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)hwmgr->backend;
+
+	rv_data->sys_info.htc_hyst_lmt = 5;
+	rv_data->sys_info.htc_tmp_lmt = 203;
+
+	if (rv_data->thermal_auto_throttling_treshold == 0)
+		 rv_data->thermal_auto_throttling_treshold = 203;
+
+	rv_construct_max_power_limits_table (hwmgr,
+				    &hwmgr->dyn_state.max_clock_voltage_on_ac);
+
+	rv_init_dynamic_state_adjustment_rule_settings(hwmgr);
+
+	return 0;
+}
+
+static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
+{
+	struct PP_Clocks clocks = {0};
+	struct pp_display_clock_request clock_req;
+
+	clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
+	clock_req.clock_type = amd_pp_dcf_clock;
+	clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
+
+	PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
+				"Attempt to set DCF Clock Failed!", return -EINVAL);
+
+	return 0;
+}
+
+static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) {
+		rv_data->deep_sleep_dcefclk = clock/100;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_SetMinDeepSleepDcefclk,
+					rv_data->deep_sleep_dcefclk);
+	}
+	return 0;
+}
+
+static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	if (rv_data->num_active_display != count) {
+		rv_data->num_active_display = count;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetDisplayCount,
+				rv_data->num_active_display);
+	}
+
+	return 0;
+}
+
+static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+	return rv_set_clock_limit(hwmgr, input);
+}
+
+static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	rv_data->vcn_power_gated = true;
+	rv_data->isp_tileA_power_gated = true;
+	rv_data->isp_tileB_power_gated = true;
+
+	return 0;
+}
+
+
+static int rv_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+	return rv_init_power_gate_state(hwmgr);
+}
+
+static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	rv_data->separation_time = 0;
+	rv_data->cc6_disable = false;
+	rv_data->pstate_disable = false;
+	rv_data->cc6_setting_changed = false;
+
+	return 0;
+}
+
+static int rv_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+	return rv_reset_cc6_data(hwmgr);
+}
+
+static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	if (rv_data->gfx_off_controled_by_driver)
+		smum_send_msg_to_smc(hwmgr,
+						PPSMC_MSG_DisableGfxOff);
+
+	return 0;
+}
+
+static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	return rv_disable_gfx_off(hwmgr);
+}
+
+static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	if (rv_data->gfx_off_controled_by_driver)
+		smum_send_msg_to_smc(hwmgr,
+						PPSMC_MSG_EnableGfxOff);
+
+	return 0;
+}
+
+static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	return rv_enable_gfx_off(hwmgr);
+}
+
+static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+				struct pp_power_state  *prequest_ps,
+			const struct pp_power_state *pcurrent_ps)
+{
+	return 0;
+}
+
+/* temporary hardcoded clock voltage breakdown tables */
+static const DpmClock_t VddDcfClk[]= {
+	{ 300, 2600},
+	{ 600, 3200},
+	{ 600, 3600},
+};
+
+static const DpmClock_t VddSocClk[]= {
+	{ 478, 2600},
+	{ 722, 3200},
+	{ 722, 3600},
+};
+
+static const DpmClock_t VddFClk[]= {
+	{ 400, 2600},
+	{1200, 3200},
+	{1200, 3600},
+};
+
+static const DpmClock_t VddDispClk[]= {
+	{ 435, 2600},
+	{ 661, 3200},
+	{1086, 3600},
+};
+
+static const DpmClock_t VddDppClk[]= {
+	{ 435, 2600},
+	{ 661, 3200},
+	{ 661, 3600},
+};
+
+static const DpmClock_t VddPhyClk[]= {
+	{ 540, 2600},
+	{ 810, 3200},
+	{ 810, 3600},
+};
+
+static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
+			struct rv_voltage_dependency_table **pptable,
+			uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
+{
+	uint32_t table_size, i;
+	struct rv_voltage_dependency_table *ptable;
+
+	table_size = sizeof(uint32_t) + sizeof(struct rv_voltage_dependency_table) * num_entry;
+	ptable = kzalloc(table_size, GFP_KERNEL);
+
+	if (NULL == ptable)
+		return -ENOMEM;
+
+	ptable->count = num_entry;
+
+	for (i = 0; i < ptable->count; i++) {
+		ptable->entries[i].clk         = pclk_dependency_table->Freq * 100;
+		ptable->entries[i].vol         = pclk_dependency_table->Vol;
+		pclk_dependency_table++;
+	}
+
+	*pptable = ptable;
+
+	return 0;
+}
+
+
+static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+	DpmClocks_t  *table = &(rv_data->clock_table);
+	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
+
+	result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE);
+
+	PP_ASSERT_WITH_CODE((0 == result),
+			"Attempt to copy clock table from smc failed",
+			return result);
+
+	if (0 == result && table->DcefClocks[0].Freq != 0) {
+		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
+						NUM_DCEFCLK_DPM_LEVELS,
+						&rv_data->clock_table.DcefClocks[0]);
+		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
+						NUM_SOCCLK_DPM_LEVELS,
+						&rv_data->clock_table.SocClocks[0]);
+		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
+						NUM_FCLK_DPM_LEVELS,
+						&rv_data->clock_table.FClocks[0]);
+		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
+						NUM_MEMCLK_DPM_LEVELS,
+						&rv_data->clock_table.MemClocks[0]);
+	} else {
+		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
+						ARRAY_SIZE(VddDcfClk),
+						&VddDcfClk[0]);
+		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
+						ARRAY_SIZE(VddSocClk),
+						&VddSocClk[0]);
+		rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
+						ARRAY_SIZE(VddFClk),
+						&VddFClk[0]);
+	}
+	rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
+					ARRAY_SIZE(VddDispClk),
+					&VddDispClk[0]);
+	rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
+					ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
+	rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
+					ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
+
+	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+			PPSMC_MSG_GetMinGfxclkFrequency),
+			"Attempt to get min GFXCLK Failed!",
+			return -1);
+	PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+			&result),
+			"Attempt to get min GFXCLK Failed!",
+			return -1);
+	rv_data->gfx_min_freq_limit = result * 100;
+
+	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+			PPSMC_MSG_GetMaxGfxclkFrequency),
+			"Attempt to get max GFXCLK Failed!",
+			return -1);
+	PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+			&result),
+			"Attempt to get max GFXCLK Failed!",
+			return -1);
+	rv_data->gfx_max_freq_limit = result * 100;
+
+	return 0;
+}
+
+static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	struct rv_hwmgr *data;
+
+	data = kzalloc(sizeof(struct rv_hwmgr), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	hwmgr->backend = data;
+
+	result = rv_initialize_dpm_defaults(hwmgr);
+	if (result != 0) {
+		pr_err("rv_initialize_dpm_defaults failed\n");
+		return result;
+	}
+
+	rv_populate_clock_table(hwmgr);
+
+	result = rv_get_system_info_data(hwmgr);
+	if (result != 0) {
+		pr_err("rv_get_system_info_data failed\n");
+		return result;
+	}
+
+	rv_construct_boot_state(hwmgr);
+
+	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+						RAVEN_MAX_HARDWARE_POWERLEVELS;
+
+	hwmgr->platform_descriptor.hardwarePerformanceLevels =
+						RAVEN_MAX_HARDWARE_POWERLEVELS;
+
+	hwmgr->platform_descriptor.vbiosInterruptId = 0;
+
+	hwmgr->platform_descriptor.clockStep.engineClock = 500;
+
+	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+
+	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+	hwmgr->pstate_sclk = RAVEN_UMD_PSTATE_GFXCLK;
+	hwmgr->pstate_mclk = RAVEN_UMD_PSTATE_FCLK;
+
+	return result;
+}
+
+static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
+
+	kfree(pinfo->vdd_dep_on_dcefclk);
+	pinfo->vdd_dep_on_dcefclk = NULL;
+	kfree(pinfo->vdd_dep_on_socclk);
+	pinfo->vdd_dep_on_socclk = NULL;
+	kfree(pinfo->vdd_dep_on_fclk);
+	pinfo->vdd_dep_on_fclk = NULL;
+	kfree(pinfo->vdd_dep_on_dispclk);
+	pinfo->vdd_dep_on_dispclk = NULL;
+	kfree(pinfo->vdd_dep_on_dppclk);
+	pinfo->vdd_dep_on_dppclk = NULL;
+	kfree(pinfo->vdd_dep_on_phyclk);
+	pinfo->vdd_dep_on_phyclk = NULL;
+
+	kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+
+	kfree(hwmgr->backend);
+	hwmgr->backend = NULL;
+
+	return 0;
+}
+
+static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+				enum amd_dpm_forced_level level)
+{
+	if (hwmgr->smu_version < 0x1E3700) {
+		pr_info("smu firmware version too old, can not set dpm level\n");
+		return 0;
+	}
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinGfxClk,
+						RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinFclkByFreq,
+						RAVEN_UMD_PSTATE_PEAK_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinSocclkByFreq,
+						RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinVcn,
+						RAVEN_UMD_PSTATE_VCE);
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxGfxClk,
+						RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxFclkByFreq,
+						RAVEN_UMD_PSTATE_PEAK_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxSocclkByFreq,
+						RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxVcn,
+						RAVEN_UMD_PSTATE_VCE);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinGfxClk,
+						RAVEN_UMD_PSTATE_MIN_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxGfxClk,
+						RAVEN_UMD_PSTATE_MIN_GFXCLK);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinFclkByFreq,
+						RAVEN_UMD_PSTATE_MIN_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxFclkByFreq,
+						RAVEN_UMD_PSTATE_MIN_FCLK);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinGfxClk,
+						RAVEN_UMD_PSTATE_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinFclkByFreq,
+						RAVEN_UMD_PSTATE_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinSocclkByFreq,
+						RAVEN_UMD_PSTATE_SOCCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinVcn,
+						RAVEN_UMD_PSTATE_VCE);
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxGfxClk,
+						RAVEN_UMD_PSTATE_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxFclkByFreq,
+						RAVEN_UMD_PSTATE_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxSocclkByFreq,
+						RAVEN_UMD_PSTATE_SOCCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxVcn,
+						RAVEN_UMD_PSTATE_VCE);
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinGfxClk,
+						RAVEN_UMD_PSTATE_MIN_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinFclkByFreq,
+						RAVEN_UMD_PSTATE_MIN_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinSocclkByFreq,
+						RAVEN_UMD_PSTATE_MIN_SOCCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinVcn,
+						RAVEN_UMD_PSTATE_MIN_VCE);
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxGfxClk,
+						RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxFclkByFreq,
+						RAVEN_UMD_PSTATE_PEAK_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxSocclkByFreq,
+						RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxVcn,
+						RAVEN_UMD_PSTATE_VCE);
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinGfxClk,
+						RAVEN_UMD_PSTATE_MIN_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxGfxClk,
+						RAVEN_UMD_PSTATE_MIN_GFXCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinFclkByFreq,
+						RAVEN_UMD_PSTATE_MIN_FCLK);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxFclkByFreq,
+						RAVEN_UMD_PSTATE_MIN_FCLK);
+		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+	default:
+		break;
+	}
+	return 0;
+}
+
+static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct rv_hwmgr *data;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	if (low)
+		return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+	else
+		return data->clock_vol_info.vdd_dep_on_fclk->entries[
+			data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
+}
+
+static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct rv_hwmgr *data;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	if (low)
+		return data->gfx_min_freq_limit;
+	else
+		return data->gfx_max_freq_limit;
+}
+
+static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+					struct pp_hw_power_state *hw_ps)
+{
+	return 0;
+}
+
+static int rv_dpm_get_pp_table_entry_callback(
+						     struct pp_hwmgr *hwmgr,
+					   struct pp_hw_power_state *hw_ps,
+							  unsigned int index,
+						     const void *clock_info)
+{
+	struct rv_power_state *rv_ps = cast_rv_ps(hw_ps);
+
+	rv_ps->levels[index].engine_clock = 0;
+
+	rv_ps->levels[index].vddc_index = 0;
+	rv_ps->level = index + 1;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+		rv_ps->levels[index].ds_divider_index = 5;
+		rv_ps->levels[index].ss_divider_index = 5;
+	}
+
+	return 0;
+}
+
+static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	unsigned long ret = 0;
+
+	result = pp_tables_get_num_of_entries(hwmgr, &ret);
+
+	return result ? 0 : ret;
+}
+
+static int rv_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+		    unsigned long entry, struct pp_power_state *ps)
+{
+	int result;
+	struct rv_power_state *rv_ps;
+
+	ps->hardware.magic = PhwRaven_Magic;
+
+	rv_ps = cast_rv_ps(&(ps->hardware));
+
+	result = pp_tables_get_entry(hwmgr, entry, ps,
+			rv_dpm_get_pp_table_entry_callback);
+
+	rv_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
+	rv_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
+
+	return result;
+}
+
+static int rv_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+	return sizeof(struct rv_power_state);
+}
+
+static int rv_set_cpu_power_state(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+
+static int rv_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
+{
+	return 0;
+}
+
+static int rv_get_dal_power_level(struct pp_hwmgr *hwmgr,
+		struct amd_pp_simple_clock_info *info)
+{
+	return -EINVAL;
+}
+
+static int rv_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask)
+{
+	return 0;
+}
+
+static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend);
+	struct rv_voltage_dependency_table *mclk_table =
+			data->clock_vol_info.vdd_dep_on_fclk;
+	int i, now, size = 0;
+
+	switch (type) {
+	case PP_SCLK:
+		PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+				PPSMC_MSG_GetGfxclkFrequency),
+				"Attempt to get current GFXCLK Failed!",
+				return -1);
+		PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+				&now),
+				"Attempt to get current GFXCLK Failed!",
+				return -1);
+
+		size += sprintf(buf + size, "0: %uMhz %s\n",
+				data->gfx_min_freq_limit / 100,
+				((data->gfx_min_freq_limit / 100)
+				 == now) ? "*" : "");
+		size += sprintf(buf + size, "1: %uMhz %s\n",
+				data->gfx_max_freq_limit / 100,
+				((data->gfx_max_freq_limit / 100)
+				 == now) ? "*" : "");
+		break;
+	case PP_MCLK:
+		PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+				PPSMC_MSG_GetFclkFrequency),
+				"Attempt to get current MEMCLK Failed!",
+				return -1);
+		PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+				&now),
+				"Attempt to get current MEMCLK Failed!",
+				return -1);
+
+		for (i = 0; i < mclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i,
+					mclk_table->entries[i].clk / 100,
+					((mclk_table->entries[i].clk / 100)
+					 == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+
+	return size;
+}
+
+static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+				PHM_PerformanceLevelDesignation designation, uint32_t index,
+				PHM_PerformanceLevel *level)
+{
+	struct rv_hwmgr *data;
+
+	if (level == NULL || hwmgr == NULL || state == NULL)
+		return -EINVAL;
+
+	data = (struct rv_hwmgr *)(hwmgr->backend);
+
+	if (index == 0) {
+		level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+		level->coreClock = data->gfx_min_freq_limit;
+	} else {
+		level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
+			data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
+		level->coreClock = data->gfx_max_freq_limit;
+	}
+
+	level->nonLocalMemoryFreq = 0;
+	level->nonLocalMemoryWidth = 0;
+
+	return 0;
+}
+
+static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
+{
+	const struct rv_power_state *ps = cast_const_rv_ps(state);
+
+	clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
+	clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
+
+	return 0;
+}
+
+#define MEM_FREQ_LOW_LATENCY        25000
+#define MEM_FREQ_HIGH_LATENCY       80000
+#define MEM_LATENCY_HIGH            245
+#define MEM_LATENCY_LOW             35
+#define MEM_LATENCY_ERR             0xFFFF
+
+
+static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
+		uint32_t clock)
+{
+	if (clock >= MEM_FREQ_LOW_LATENCY &&
+			clock < MEM_FREQ_HIGH_LATENCY)
+		return MEM_LATENCY_HIGH;
+	else if (clock >= MEM_FREQ_HIGH_LATENCY)
+		return MEM_LATENCY_LOW;
+	else
+		return MEM_LATENCY_ERR;
+}
+
+static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
+		enum amd_pp_clock_type type,
+		struct pp_clock_levels_with_latency *clocks)
+{
+	uint32_t i;
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
+	struct rv_voltage_dependency_table *pclk_vol_table;
+	bool latency_required = false;
+
+	if (pinfo == NULL)
+		return -EINVAL;
+
+	switch (type) {
+	case amd_pp_mem_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_mclk;
+		latency_required = true;
+		break;
+	case amd_pp_f_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_fclk;
+		latency_required = true;
+		break;
+	case amd_pp_dcf_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
+		break;
+	case amd_pp_disp_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_dispclk;
+		break;
+	case amd_pp_phy_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_phyclk;
+		break;
+	case amd_pp_dpp_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_dppclk;
+	default:
+		return -EINVAL;
+	}
+
+	if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
+		return -EINVAL;
+
+	clocks->num_levels = 0;
+	for (i = 0; i < pclk_vol_table->count; i++) {
+		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+		clocks->data[i].latency_in_us = latency_required ?
+						rv_get_mem_latency(hwmgr,
+						pclk_vol_table->entries[i].clk) :
+						0;
+		clocks->num_levels++;
+	}
+
+	return 0;
+}
+
+static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+		enum amd_pp_clock_type type,
+		struct pp_clock_levels_with_voltage *clocks)
+{
+	uint32_t i;
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+	struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
+	struct rv_voltage_dependency_table *pclk_vol_table = NULL;
+
+	if (pinfo == NULL)
+		return -EINVAL;
+
+	switch (type) {
+	case amd_pp_mem_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_mclk;
+		break;
+	case amd_pp_f_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_fclk;
+		break;
+	case amd_pp_dcf_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
+		break;
+	case amd_pp_soc_clock:
+		pclk_vol_table = pinfo->vdd_dep_on_socclk;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
+		return -EINVAL;
+
+	clocks->num_levels = 0;
+	for (i = 0; i < pclk_vol_table->count; i++) {
+		clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+		clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
+		clocks->num_levels++;
+	}
+
+	return 0;
+}
+
+int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+		struct pp_display_clock_request *clock_req)
+{
+	int result = 0;
+	struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+	enum amd_pp_clock_type clk_type = clock_req->clock_type;
+	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+	PPSMC_Msg        msg;
+
+	switch (clk_type) {
+	case amd_pp_dcf_clock:
+		if (clk_freq == rv_data->dcf_actual_hard_min_freq)
+			return 0;
+		msg =  PPSMC_MSG_SetHardMinDcefclkByFreq;
+		rv_data->dcf_actual_hard_min_freq = clk_freq;
+		break;
+	case amd_pp_soc_clock:
+		 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
+		break;
+	case amd_pp_f_clock:
+		if (clk_freq == rv_data->f_actual_hard_min_freq)
+			return 0;
+		rv_data->f_actual_hard_min_freq = clk_freq;
+		msg = PPSMC_MSG_SetHardMinFclkByFreq;
+		break;
+	default:
+		pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
+		return -EINVAL;
+	}
+
+	result = smum_send_msg_to_smc_with_parameter(hwmgr, msg,
+							clk_freq);
+
+	return result;
+}
+
+static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+{
+	clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
+	return 0;
+}
+
+static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
+			mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
+	uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset);
+	int cur_temp =
+		(reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
+
+	if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
+		cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	else
+		cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	return cur_temp;
+}
+
+static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+			  void *value, int *size)
+{
+	uint32_t sclk, mclk;
+	int ret = 0;
+
+	switch (idx) {
+	case AMDGPU_PP_SENSOR_GFX_SCLK:
+		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
+		if (!ret) {
+			rv_read_arg_from_smc(hwmgr, &sclk);
+			/* in units of 10KHZ */
+			*((uint32_t *)value) = sclk * 100;
+			*size = 4;
+		}
+		break;
+	case AMDGPU_PP_SENSOR_GFX_MCLK:
+		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
+		if (!ret) {
+			rv_read_arg_from_smc(hwmgr, &mclk);
+			/* in units of 10KHZ */
+			*((uint32_t *)value) = mclk * 100;
+			*size = 4;
+		}
+		break;
+	case AMDGPU_PP_SENSOR_GPU_TEMP:
+		*((uint32_t *)value) = rv_thermal_get_temperature(hwmgr);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int rv_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+{
+	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+}
+
+static const struct pp_hwmgr_func rv_hwmgr_funcs = {
+	.backend_init = rv_hwmgr_backend_init,
+	.backend_fini = rv_hwmgr_backend_fini,
+	.asic_setup = NULL,
+	.apply_state_adjust_rules = rv_apply_state_adjust_rules,
+	.force_dpm_level = rv_dpm_force_dpm_level,
+	.get_power_state_size = rv_get_power_state_size,
+	.powerdown_uvd = NULL,
+	.powergate_uvd = NULL,
+	.powergate_vce = NULL,
+	.get_mclk = rv_dpm_get_mclk,
+	.get_sclk = rv_dpm_get_sclk,
+	.patch_boot_state = rv_dpm_patch_boot_state,
+	.get_pp_table_entry = rv_dpm_get_pp_table_entry,
+	.get_num_of_pp_table_entries = rv_dpm_get_num_of_pp_table_entries,
+	.set_cpu_power_state = rv_set_cpu_power_state,
+	.store_cc6_data = rv_store_cc6_data,
+	.force_clock_level = rv_force_clock_level,
+	.print_clock_levels = rv_print_clock_levels,
+	.get_dal_power_level = rv_get_dal_power_level,
+	.get_performance_level = rv_get_performance_level,
+	.get_current_shallow_sleep_clocks = rv_get_current_shallow_sleep_clocks,
+	.get_clock_by_type_with_latency = rv_get_clock_by_type_with_latency,
+	.get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage,
+	.get_max_high_clocks = rv_get_max_high_clocks,
+	.read_sensor = rv_read_sensor,
+	.set_active_display_count = rv_set_active_display_count,
+	.set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk,
+	.dynamic_state_management_enable = rv_enable_dpm_tasks,
+	.power_off_asic = rv_power_off_asic,
+	.asic_setup = rv_setup_asic_task,
+	.power_state_set = rv_set_power_state_tasks,
+	.dynamic_state_management_disable = rv_disable_dpm_tasks,
+	.set_mmhub_powergating_by_smu = rv_set_mmhub_powergating_by_smu,
+};
+
+int rv_init_function_pointers(struct pp_hwmgr *hwmgr)
+{
+	hwmgr->hwmgr_func = &rv_hwmgr_funcs;
+	hwmgr->pptable_func = &pptable_funcs;
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
new file mode 100644
index 0000000..c3bc311
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RAVEN_HWMGR_H
+#define RAVEN_HWMGR_H
+
+#include "hwmgr.h"
+#include "rv_inc.h"
+#include "smu10_driver_if.h"
+#include "rv_ppsmc.h"
+
+
+#define RAVEN_MAX_HARDWARE_POWERLEVELS               8
+#define PHMRAVEN_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS   15
+
+#define DPMFlags_SCLK_Enabled                     0x00000001
+#define DPMFlags_UVD_Enabled                      0x00000002
+#define DPMFlags_VCE_Enabled                      0x00000004
+#define DPMFlags_ACP_Enabled                      0x00000008
+#define DPMFlags_ForceHighestValid                0x40000000
+
+/* Do not change the following, it is also defined in SMU8.h */
+#define SMU_EnabledFeatureScoreboard_AcpDpmOn     0x00000001
+#define SMU_EnabledFeatureScoreboard_SclkDpmOn    0x00200000
+#define SMU_EnabledFeatureScoreboard_UvdDpmOn     0x01000000
+#define SMU_EnabledFeatureScoreboard_VceDpmOn     0x02000000
+
+#define SMU_PHYID_SHIFT      8
+
+#define RAVEN_PCIE_POWERGATING_TARGET_GFX            0
+#define RAVEN_PCIE_POWERGATING_TARGET_DDI            1
+#define RAVEN_PCIE_POWERGATING_TARGET_PLLCASCADE     2
+#define RAVEN_PCIE_POWERGATING_TARGET_PHY            3
+
+enum VQ_TYPE {
+	CLOCK_TYPE_DCLK = 0L,
+	CLOCK_TYPE_ECLK,
+	CLOCK_TYPE_SCLK,
+	CLOCK_TYPE_CCLK,
+	VQ_GFX_CU
+};
+
+#define SUSTAINABLE_SCLK_MASK  0x00ffffff
+#define SUSTAINABLE_SCLK_SHIFT 0
+#define SUSTAINABLE_CU_MASK    0xff000000
+#define SUSTAINABLE_CU_SHIFT   24
+
+struct rv_dpm_entry {
+	uint32_t soft_min_clk;
+	uint32_t hard_min_clk;
+	uint32_t soft_max_clk;
+	uint32_t hard_max_clk;
+};
+
+struct rv_power_level {
+	uint32_t engine_clock;
+	uint8_t vddc_index;
+	uint8_t ds_divider_index;
+	uint8_t ss_divider_index;
+	uint8_t allow_gnb_slow;
+	uint8_t force_nbp_state;
+	uint8_t display_wm;
+	uint8_t vce_wm;
+	uint8_t num_simd_to_powerdown;
+	uint8_t hysteresis_up;
+	uint8_t rsv[3];
+};
+
+/*used for the nbpsFlags field in rv_power state*/
+#define RAVEN_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
+#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
+#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
+
+#define RAVEN_POWERSTATE_FLAGS_BAPM_DISABLE    (1<<0)
+
+struct rv_uvd_clocks {
+	uint32_t vclk;
+	uint32_t dclk;
+	uint32_t vclk_low_divider;
+	uint32_t vclk_high_divider;
+	uint32_t dclk_low_divider;
+	uint32_t dclk_high_divider;
+};
+
+struct pp_disable_nbpslo_flags {
+	union {
+		struct {
+			uint32_t entry : 1;
+			uint32_t display : 1;
+			uint32_t driver: 1;
+			uint32_t vce : 1;
+			uint32_t uvd : 1;
+			uint32_t acp : 1;
+			uint32_t reserved: 26;
+		} bits;
+		uint32_t u32All;
+	};
+};
+
+
+enum rv_pstate_previous_action {
+	DO_NOTHING = 1,
+	FORCE_HIGH,
+	CANCEL_FORCE_HIGH
+};
+
+struct rv_power_state {
+	unsigned int magic;
+	uint32_t level;
+	struct rv_uvd_clocks uvd_clocks;
+	uint32_t evclk;
+	uint32_t ecclk;
+	uint32_t samclk;
+	uint32_t acpclk;
+	bool need_dfs_bypass;
+
+	uint32_t nbps_flags;
+	uint32_t bapm_flags;
+	uint8_t dpm0_pg_nbps_low;
+	uint8_t dpm0_pg_nbps_high;
+	uint8_t dpm_x_nbps_low;
+	uint8_t dpm_x_nbps_high;
+
+	enum rv_pstate_previous_action action;
+
+	struct rv_power_level levels[RAVEN_MAX_HARDWARE_POWERLEVELS];
+	struct pp_disable_nbpslo_flags nbpslo_flags;
+};
+
+#define RAVEN_NUM_NBPSTATES        4
+#define RAVEN_NUM_NBPMEMORYCLOCK   2
+
+
+struct rv_display_phy_info_entry {
+	uint8_t                   phy_present;
+	uint8_t                   active_lane_mapping;
+	uint8_t                   display_config_type;
+	uint8_t                   active_num_of_lanes;
+};
+
+#define RAVEN_MAX_DISPLAYPHY_IDS       10
+
+struct rv_display_phy_info {
+	bool                         display_phy_access_initialized;
+	struct rv_display_phy_info_entry  entries[RAVEN_MAX_DISPLAYPHY_IDS];
+};
+
+#define MAX_DISPLAY_CLOCK_LEVEL 8
+
+struct rv_system_info{
+	uint8_t                      htc_tmp_lmt;
+	uint8_t                      htc_hyst_lmt;
+};
+
+#define MAX_REGULAR_DPM_NUMBER 8
+
+struct rv_mclk_latency_entries {
+	uint32_t  frequency;
+	uint32_t  latency;
+};
+
+struct rv_mclk_latency_table {
+	uint32_t  count;
+	struct rv_mclk_latency_entries  entries[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct rv_clock_voltage_dependency_record {
+	uint32_t clk;
+	uint32_t vol;
+};
+
+
+struct rv_voltage_dependency_table {
+	uint32_t count;
+	struct rv_clock_voltage_dependency_record entries[1];
+};
+
+struct rv_clock_voltage_information {
+	struct rv_voltage_dependency_table    *vdd_dep_on_dcefclk;
+	struct rv_voltage_dependency_table    *vdd_dep_on_socclk;
+	struct rv_voltage_dependency_table    *vdd_dep_on_fclk;
+	struct rv_voltage_dependency_table    *vdd_dep_on_mclk;
+	struct rv_voltage_dependency_table    *vdd_dep_on_dispclk;
+	struct rv_voltage_dependency_table    *vdd_dep_on_dppclk;
+	struct rv_voltage_dependency_table    *vdd_dep_on_phyclk;
+};
+
+struct rv_hwmgr {
+	uint32_t disable_driver_thermal_policy;
+	uint32_t thermal_auto_throttling_treshold;
+	struct rv_system_info sys_info;
+	struct rv_mclk_latency_table mclk_latency_table;
+
+	uint32_t ddi_power_gating_disabled;
+
+	struct rv_display_phy_info_entry            display_phy_info;
+	uint32_t dce_slow_sclk_threshold;
+
+	bool disp_clk_bypass;
+	bool disp_clk_bypass_pending;
+	uint32_t bapm_enabled;
+
+	bool video_start;
+	bool battery_state;
+
+	uint32_t is_nb_dpm_enabled;
+	uint32_t is_voltage_island_enabled;
+	uint32_t disable_smu_acp_s3_handshake;
+	uint32_t disable_notify_smu_vpu_recovery;
+	bool                           in_vpu_recovery;
+	bool pg_acp_init;
+	uint8_t disp_config;
+
+	/* PowerTune */
+	uint32_t power_containment_features;
+	bool cac_enabled;
+	bool disable_uvd_power_tune_feature;
+	bool enable_bapm_feature;
+	bool enable_tdc_limit_feature;
+
+
+	/* SMC SRAM Address of firmware header tables */
+	uint32_t sram_end;
+	uint32_t dpm_table_start;
+	uint32_t soft_regs_start;
+
+	/* start of SMU7_Fusion_DpmTable */
+
+	uint8_t uvd_level_count;
+	uint8_t vce_level_count;
+	uint8_t acp_level_count;
+	uint8_t samu_level_count;
+
+	uint32_t fps_high_threshold;
+	uint32_t fps_low_threshold;
+
+	uint32_t dpm_flags;
+	struct rv_dpm_entry sclk_dpm;
+	struct rv_dpm_entry uvd_dpm;
+	struct rv_dpm_entry vce_dpm;
+	struct rv_dpm_entry acp_dpm;
+	bool acp_power_up_no_dsp;
+
+	uint32_t max_sclk_level;
+	uint32_t num_of_clk_entries;
+
+	/* CPU Power State */
+	uint32_t                          separation_time;
+	bool                              cc6_disable;
+	bool                              pstate_disable;
+	bool                              cc6_setting_changed;
+
+	uint32_t                             ulTotalActiveCUs;
+
+	bool                           isp_tileA_power_gated;
+	bool                           isp_tileB_power_gated;
+	uint32_t                       isp_actual_hard_min_freq;
+	uint32_t                       soc_actual_hard_min_freq;
+	uint32_t                       dcf_actual_hard_min_freq;
+
+	uint32_t                        f_actual_hard_min_freq;
+	uint32_t                        fabric_actual_soft_min_freq;
+	uint32_t                        vclk_soft_min;
+	uint32_t                        dclk_soft_min;
+	uint32_t                        gfx_actual_soft_min_freq;
+	uint32_t                        gfx_min_freq_limit;
+	uint32_t                        gfx_max_freq_limit;
+
+	bool                           vcn_power_gated;
+	bool                           vcn_dpg_mode;
+
+	bool                           gfx_off_controled_by_driver;
+	Watermarks_t                      water_marks_table;
+	struct rv_clock_voltage_information   clock_vol_info;
+	DpmClocks_t                       clock_table;
+
+	uint32_t active_process_mask;
+	bool need_min_deep_sleep_dcefclk;
+	uint32_t                             deep_sleep_dcefclk;
+	uint32_t                             num_active_display;
+};
+
+struct pp_hwmgr;
+
+int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
+
+/* UMD PState Raven Msg Parameters in MHz */
+#define RAVEN_UMD_PSTATE_GFXCLK                 700
+#define RAVEN_UMD_PSTATE_SOCCLK                 626
+#define RAVEN_UMD_PSTATE_FCLK                   933
+#define RAVEN_UMD_PSTATE_VCE                    0x03C00320
+
+#define RAVEN_UMD_PSTATE_PEAK_GFXCLK            1100
+#define RAVEN_UMD_PSTATE_PEAK_SOCCLK            757
+#define RAVEN_UMD_PSTATE_PEAK_FCLK              1200
+
+#define RAVEN_UMD_PSTATE_MIN_GFXCLK             200
+#define RAVEN_UMD_PSTATE_MIN_FCLK               400
+#define RAVEN_UMD_PSTATE_MIN_SOCCLK             200
+#define RAVEN_UMD_PSTATE_MIN_VCE                0x0190012C
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 98e701e..f5c4542 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -25,7 +25,7 @@
 
 SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
 	  polaris10_smumgr.o iceland_smumgr.o \
-	  smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
+	  smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
deleted file mode 100644
index cf9ef7a..0000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "smumgr.h"
-#include "rv_inc.h"
-#include "pp_soc15.h"
-#include "rv_smumgr.h"
-#include "ppatomctrl.h"
-#include "rv_ppsmc.h"
-#include "smu10_driver_if.h"
-#include "smu10.h"
-#include "ppatomctrl.h"
-#include "pp_debug.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu7_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-
-#define BUFFER_SIZE                 80000
-#define MAX_STRING_SIZE             15
-#define BUFFER_SIZETWO              131072
-
-#define MP0_Public                  0x03800000
-#define MP0_SRAM                    0x03900000
-#define MP1_Public                  0x03b00000
-#define MP1_SRAM                    0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS       0x3010028
-
-
-bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
-	uint32_t mp1_fw_flags, reg;
-
-	reg = soc15_get_register_offset(NBIF_HWID, 0,
-			mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
-
-	cgs_write_register(hwmgr->device, reg,
-			(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
-	reg = soc15_get_register_offset(NBIF_HWID, 0,
-			mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
-
-	mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
-
-	if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
-		return true;
-
-	return false;
-}
-
-static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
-{
-	uint32_t reg;
-
-	if (!rv_is_smc_ram_running(hwmgr))
-		return -EINVAL;
-
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-
-	phm_wait_for_register_unequal(hwmgr, reg,
-			0, MP1_C2PMSG_90__CONTENT_MASK);
-
-	return cgs_read_register(hwmgr->device, reg);
-}
-
-int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
-		uint16_t msg)
-{
-	uint32_t reg;
-
-	if (!rv_is_smc_ram_running(hwmgr))
-		return -EINVAL;
-
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
-	cgs_write_register(hwmgr->device, reg, msg);
-
-	return 0;
-}
-
-int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
-	uint32_t reg;
-
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-
-	*arg = cgs_read_register(hwmgr->device, reg);
-
-	return 0;
-}
-
-int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-	uint32_t reg;
-
-	rv_wait_for_response(hwmgr);
-
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
-
-	rv_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-	if (rv_wait_for_response(hwmgr) == 0)
-		printk("Failed to send Message %x.\n", msg);
-
-	return 0;
-}
-
-
-int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-		uint16_t msg, uint32_t parameter)
-{
-	uint32_t reg;
-
-	rv_wait_for_response(hwmgr);
-
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
-
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-	cgs_write_register(hwmgr->device, reg, parameter);
-
-	rv_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-
-	if (rv_wait_for_response(hwmgr) == 0)
-		printk("Failed to send Message %x.\n", msg);
-
-	return 0;
-}
-
-int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
-		uint8_t *table, int16_t table_id)
-{
-	struct rv_smumgr *priv =
-			(struct rv_smumgr *)(hwmgr->smu_backend);
-
-	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
-			"Invalid SMU Table ID!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
-			"Invalid SMU Table version!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
-			"Invalid SMU Table Length!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetDriverDramAddrHigh,
-			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
-			"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetDriverDramAddrLow,
-			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
-			"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
-			return -EINVAL;);
-	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_TransferTableSmu2Dram,
-			priv->smu_tables.entry[table_id].table_id) == 0,
-			"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
-			return -EINVAL;);
-
-	memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
-			priv->smu_tables.entry[table_id].size);
-
-	return 0;
-}
-
-int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
-		uint8_t *table, int16_t table_id)
-{
-	struct rv_smumgr *priv =
-			(struct rv_smumgr *)(hwmgr->smu_backend);
-
-	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
-			"Invalid SMU Table ID!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
-			"Invalid SMU Table version!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
-			"Invalid SMU Table Length!", return -EINVAL;);
-
-	memcpy(priv->smu_tables.entry[table_id].table, table,
-			priv->smu_tables.entry[table_id].size);
-
-	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetDriverDramAddrHigh,
-			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
-			"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
-			return -EINVAL;);
-	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetDriverDramAddrLow,
-			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
-			"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
-			return -EINVAL;);
-	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_TransferTableDram2Smu,
-			priv->smu_tables.entry[table_id].table_id) == 0,
-			"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
-			return -EINVAL;);
-
-	return 0;
-}
-
-static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
-{
-	uint32_t smc_driver_if_version;
-
-	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
-			PPSMC_MSG_GetDriverIfVersion),
-			"Attempt to get SMC IF Version Number Failed!",
-			return -EINVAL);
-	PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
-			&smc_driver_if_version),
-			"Attempt to read SMC IF Version Number Failed!",
-			return -EINVAL);
-
-	if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION)
-		return -EINVAL;
-
-	return 0;
-}
-
-/* sdma is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
-{
-	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
-			PPSMC_MSG_PowerUpSdma),
-			"Attempt to power up sdma Failed!",
-			return -EINVAL);
-
-	return 0;
-}
-
-static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
-{
-	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
-			PPSMC_MSG_PowerDownSdma),
-			"Attempt to power down sdma Failed!",
-			return -EINVAL);
-
-	return 0;
-}
-
-/* vcn is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
-{
-	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_PowerUpVcn, 0),
-			"Attempt to power up vcn Failed!",
-			return -EINVAL);
-
-	return 0;
-}
-
-static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
-{
-	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_PowerDownVcn, 0),
-			"Attempt to power down vcn Failed!",
-			return -EINVAL);
-
-	return 0;
-}
-
-static int rv_smu_fini(struct pp_hwmgr *hwmgr)
-{
-	struct rv_smumgr *priv =
-			(struct rv_smumgr *)(hwmgr->smu_backend);
-
-	if (priv) {
-		rv_smc_disable_sdma(hwmgr);
-		rv_smc_disable_vcn(hwmgr);
-		amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
-					&priv->smu_tables.entry[WMTABLE].mc_addr,
-					priv->smu_tables.entry[WMTABLE].table);
-		amdgpu_bo_free_kernel(&priv->smu_tables.entry[CLOCKTABLE].handle,
-					&priv->smu_tables.entry[CLOCKTABLE].mc_addr,
-					priv->smu_tables.entry[CLOCKTABLE].table);
-		kfree(hwmgr->smu_backend);
-		hwmgr->smu_backend = NULL;
-	}
-
-	return 0;
-}
-
-static int rv_start_smu(struct pp_hwmgr *hwmgr)
-{
-	struct cgs_firmware_info info = {0};
-
-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
-	rv_read_arg_from_smc(hwmgr, &hwmgr->smu_version);
-	info.version = hwmgr->smu_version >> 8;
-
-	cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
-
-	if (rv_verify_smc_interface(hwmgr))
-		return -EINVAL;
-	if (rv_smc_enable_sdma(hwmgr))
-		return -EINVAL;
-	if (rv_smc_enable_vcn(hwmgr))
-		return -EINVAL;
-
-	return 0;
-}
-
-static int rv_smu_init(struct pp_hwmgr *hwmgr)
-{
-	struct rv_smumgr *priv;
-	uint64_t mc_addr;
-	void *kaddr = NULL;
-	struct amdgpu_bo *handle;
-	int r;
-
-	priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
-
-	if (!priv)
-		return -ENOMEM;
-
-	hwmgr->smu_backend = priv;
-
-	/* allocate space for watermarks table */
-	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
-			sizeof(Watermarks_t),
-			PAGE_SIZE,
-			AMDGPU_GEM_DOMAIN_VRAM,
-			&handle,
-			&mc_addr,
-			&kaddr);
-
-	if (r)
-		return -EINVAL;
-
-	priv->smu_tables.entry[WMTABLE].version = 0x01;
-	priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
-	priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
-	priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
-	priv->smu_tables.entry[WMTABLE].table = kaddr;
-	priv->smu_tables.entry[WMTABLE].handle = handle;
-
-	/* allocate space for watermarks table */
-	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
-			sizeof(DpmClocks_t),
-			PAGE_SIZE,
-			AMDGPU_GEM_DOMAIN_VRAM,
-			&handle,
-			&mc_addr,
-			&kaddr);
-
-	if (r) {
-		amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
-					&priv->smu_tables.entry[WMTABLE].mc_addr,
-					&priv->smu_tables.entry[WMTABLE].table);
-		return -EINVAL;
-	}
-
-	priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
-	priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
-	priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
-	priv->smu_tables.entry[CLOCKTABLE].mc_addr = mc_addr;
-	priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
-	priv->smu_tables.entry[CLOCKTABLE].handle = handle;
-
-	return 0;
-}
-
-const struct pp_smumgr_func rv_smu_funcs = {
-	.smu_init = &rv_smu_init,
-	.smu_fini = &rv_smu_fini,
-	.start_smu = &rv_start_smu,
-	.request_smu_load_specific_fw = NULL,
-	.send_msg_to_smc = &rv_send_msg_to_smc,
-	.send_msg_to_smc_with_parameter = &rv_send_msg_to_smc_with_parameter,
-	.download_pptable_settings = NULL,
-	.upload_pptable_settings = NULL,
-};
-
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
deleted file mode 100644
index 0ff4ac5..0000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef PP_RAVEN_SMUMANAGER_H
-#define PP_RAVEN_SMUMANAGER_H
-
-#include "rv_ppsmc.h"
-#include "smu10_driver_if.h"
-
-enum SMU_TABLE_ID {
-	WMTABLE = 0,
-	CLOCKTABLE,
-	MAX_SMU_TABLE,
-};
-
-struct smu_table_entry {
-	uint32_t version;
-	uint32_t size;
-	uint32_t table_id;
-	uint64_t mc_addr;
-	void *table;
-	struct amdgpu_bo *handle;
-};
-
-struct smu_table_array {
-	struct smu_table_entry entry[MAX_SMU_TABLE];
-};
-
-struct rv_smumgr {
-	struct smu_table_array            smu_tables;
-};
-
-int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
-bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
-int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
-		uint8_t *table, int16_t table_id);
-int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
-		uint8_t *table, int16_t table_id);
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
new file mode 100644
index 0000000..3a349f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "rv_inc.h"
+#include "pp_soc15.h"
+#include "smu10_smumgr.h"
+#include "ppatomctrl.h"
+#include "rv_ppsmc.h"
+#include "smu10_driver_if.h"
+#include "smu10.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu7_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+
+#define BUFFER_SIZE                 80000
+#define MAX_STRING_SIZE             15
+#define BUFFER_SIZETWO              131072
+
+#define MP0_Public                  0x03800000
+#define MP0_SRAM                    0x03900000
+#define MP1_Public                  0x03b00000
+#define MP1_SRAM                    0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS       0x3010028
+
+
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+	uint32_t mp1_fw_flags, reg;
+
+	reg = soc15_get_register_offset(NBIF_HWID, 0,
+			mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
+
+	cgs_write_register(hwmgr->device, reg,
+			(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+
+	reg = soc15_get_register_offset(NBIF_HWID, 0,
+			mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
+
+	mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
+
+	if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+		return true;
+
+	return false;
+}
+
+static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reg;
+
+	if (!rv_is_smc_ram_running(hwmgr))
+		return -EINVAL;
+
+	reg = soc15_get_register_offset(MP1_HWID, 0,
+			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+
+	phm_wait_for_register_unequal(hwmgr, reg,
+			0, MP1_C2PMSG_90__CONTENT_MASK);
+
+	return cgs_read_register(hwmgr->device, reg);
+}
+
+int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+		uint16_t msg)
+{
+	uint32_t reg;
+
+	if (!rv_is_smc_ram_running(hwmgr))
+		return -EINVAL;
+
+	reg = soc15_get_register_offset(MP1_HWID, 0,
+			mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
+	cgs_write_register(hwmgr->device, reg, msg);
+
+	return 0;
+}
+
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
+{
+	uint32_t reg;
+
+	reg = soc15_get_register_offset(MP1_HWID, 0,
+			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+
+	*arg = cgs_read_register(hwmgr->device, reg);
+
+	return 0;
+}
+
+int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+	uint32_t reg;
+
+	rv_wait_for_response(hwmgr);
+
+	reg = soc15_get_register_offset(MP1_HWID, 0,
+			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+	cgs_write_register(hwmgr->device, reg, 0);
+
+	rv_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+	if (rv_wait_for_response(hwmgr) == 0)
+		printk("Failed to send Message %x.\n", msg);
+
+	return 0;
+}
+
+
+int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+		uint16_t msg, uint32_t parameter)
+{
+	uint32_t reg;
+
+	rv_wait_for_response(hwmgr);
+
+	reg = soc15_get_register_offset(MP1_HWID, 0,
+			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+	cgs_write_register(hwmgr->device, reg, 0);
+
+	reg = soc15_get_register_offset(MP1_HWID, 0,
+			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+	cgs_write_register(hwmgr->device, reg, parameter);
+
+	rv_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+
+	if (rv_wait_for_response(hwmgr) == 0)
+		printk("Failed to send Message %x.\n", msg);
+
+	return 0;
+}
+
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+		uint8_t *table, int16_t table_id)
+{
+	struct rv_smumgr *priv =
+			(struct rv_smumgr *)(hwmgr->smu_backend);
+
+	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
+			"Invalid SMU Table ID!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+			"Invalid SMU Table version!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+			"Invalid SMU Table Length!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetDriverDramAddrHigh,
+			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+			"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetDriverDramAddrLow,
+			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+			"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
+			return -EINVAL;);
+	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_TransferTableSmu2Dram,
+			priv->smu_tables.entry[table_id].table_id) == 0,
+			"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
+			return -EINVAL;);
+
+	memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
+			priv->smu_tables.entry[table_id].size);
+
+	return 0;
+}
+
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+		uint8_t *table, int16_t table_id)
+{
+	struct rv_smumgr *priv =
+			(struct rv_smumgr *)(hwmgr->smu_backend);
+
+	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
+			"Invalid SMU Table ID!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+			"Invalid SMU Table version!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+			"Invalid SMU Table Length!", return -EINVAL;);
+
+	memcpy(priv->smu_tables.entry[table_id].table, table,
+			priv->smu_tables.entry[table_id].size);
+
+	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetDriverDramAddrHigh,
+			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+			"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
+			return -EINVAL;);
+	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetDriverDramAddrLow,
+			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+			"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
+			return -EINVAL;);
+	PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_TransferTableDram2Smu,
+			priv->smu_tables.entry[table_id].table_id) == 0,
+			"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
+			return -EINVAL;);
+
+	return 0;
+}
+
+static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
+{
+	uint32_t smc_driver_if_version;
+
+	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
+			PPSMC_MSG_GetDriverIfVersion),
+			"Attempt to get SMC IF Version Number Failed!",
+			return -EINVAL);
+	PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+			&smc_driver_if_version),
+			"Attempt to read SMC IF Version Number Failed!",
+			return -EINVAL);
+
+	if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* sdma is disabled by default in vbios, need to re-enable in driver */
+static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
+{
+	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
+			PPSMC_MSG_PowerUpSdma),
+			"Attempt to power up sdma Failed!",
+			return -EINVAL);
+
+	return 0;
+}
+
+static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
+{
+	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
+			PPSMC_MSG_PowerDownSdma),
+			"Attempt to power down sdma Failed!",
+			return -EINVAL);
+
+	return 0;
+}
+
+/* vcn is disabled by default in vbios, need to re-enable in driver */
+static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
+{
+	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_PowerUpVcn, 0),
+			"Attempt to power up vcn Failed!",
+			return -EINVAL);
+
+	return 0;
+}
+
+static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
+{
+	PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_PowerDownVcn, 0),
+			"Attempt to power down vcn Failed!",
+			return -EINVAL);
+
+	return 0;
+}
+
+static int rv_smu_fini(struct pp_hwmgr *hwmgr)
+{
+	struct rv_smumgr *priv =
+			(struct rv_smumgr *)(hwmgr->smu_backend);
+
+	if (priv) {
+		rv_smc_disable_sdma(hwmgr);
+		rv_smc_disable_vcn(hwmgr);
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+					&priv->smu_tables.entry[WMTABLE].mc_addr,
+					priv->smu_tables.entry[WMTABLE].table);
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[CLOCKTABLE].handle,
+					&priv->smu_tables.entry[CLOCKTABLE].mc_addr,
+					priv->smu_tables.entry[CLOCKTABLE].table);
+		kfree(hwmgr->smu_backend);
+		hwmgr->smu_backend = NULL;
+	}
+
+	return 0;
+}
+
+static int rv_start_smu(struct pp_hwmgr *hwmgr)
+{
+	struct cgs_firmware_info info = {0};
+
+	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+	rv_read_arg_from_smc(hwmgr, &hwmgr->smu_version);
+	info.version = hwmgr->smu_version >> 8;
+
+	cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
+	if (rv_verify_smc_interface(hwmgr))
+		return -EINVAL;
+	if (rv_smc_enable_sdma(hwmgr))
+		return -EINVAL;
+	if (rv_smc_enable_vcn(hwmgr))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int rv_smu_init(struct pp_hwmgr *hwmgr)
+{
+	struct rv_smumgr *priv;
+	uint64_t mc_addr;
+	void *kaddr = NULL;
+	struct amdgpu_bo *handle;
+	int r;
+
+	priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
+
+	if (!priv)
+		return -ENOMEM;
+
+	hwmgr->smu_backend = priv;
+
+	/* allocate space for watermarks table */
+	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+			sizeof(Watermarks_t),
+			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
+			&mc_addr,
+			&kaddr);
+
+	if (r)
+		return -EINVAL;
+
+	priv->smu_tables.entry[WMTABLE].version = 0x01;
+	priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
+	priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
+	priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
+	priv->smu_tables.entry[WMTABLE].table = kaddr;
+	priv->smu_tables.entry[WMTABLE].handle = handle;
+
+	/* allocate space for watermarks table */
+	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+			sizeof(DpmClocks_t),
+			PAGE_SIZE,
+			AMDGPU_GEM_DOMAIN_VRAM,
+			&handle,
+			&mc_addr,
+			&kaddr);
+
+	if (r) {
+		amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+					&priv->smu_tables.entry[WMTABLE].mc_addr,
+					&priv->smu_tables.entry[WMTABLE].table);
+		return -EINVAL;
+	}
+
+	priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
+	priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
+	priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
+	priv->smu_tables.entry[CLOCKTABLE].mc_addr = mc_addr;
+	priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
+	priv->smu_tables.entry[CLOCKTABLE].handle = handle;
+
+	return 0;
+}
+
+const struct pp_smumgr_func rv_smu_funcs = {
+	.smu_init = &rv_smu_init,
+	.smu_fini = &rv_smu_fini,
+	.start_smu = &rv_start_smu,
+	.request_smu_load_specific_fw = NULL,
+	.send_msg_to_smc = &rv_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = &rv_send_msg_to_smc_with_parameter,
+	.download_pptable_settings = NULL,
+	.upload_pptable_settings = NULL,
+};
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
new file mode 100644
index 0000000..0ff4ac5
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef PP_RAVEN_SMUMANAGER_H
+#define PP_RAVEN_SMUMANAGER_H
+
+#include "rv_ppsmc.h"
+#include "smu10_driver_if.h"
+
+enum SMU_TABLE_ID {
+	WMTABLE = 0,
+	CLOCKTABLE,
+	MAX_SMU_TABLE,
+};
+
+struct smu_table_entry {
+	uint32_t version;
+	uint32_t size;
+	uint32_t table_id;
+	uint64_t mc_addr;
+	void *table;
+	struct amdgpu_bo *handle;
+};
+
+struct smu_table_array {
+	struct smu_table_entry entry[MAX_SMU_TABLE];
+};
+
+struct rv_smumgr {
+	struct smu_table_array            smu_tables;
+};
+
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+		uint8_t *table, int16_t table_id);
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+		uint8_t *table, int16_t table_id);
+
+
+#endif
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/4] drm/amd/pp: Replace files name cz_* with smu8_*
       [not found] ` <1520331047-10759-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
@ 2018-03-06 10:10   ` Rex Zhu
  2018-03-06 16:41   ` [PATCH 1/4] drm/amd/pp: Replace files name rv_* with smu10_* Alex Deucher
  1 sibling, 0 replies; 4+ messages in thread
From: Rex Zhu @ 2018-03-06 10:10 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Rex Zhu

Powerplay is for the hw ip smu, smu8 was used on CZ/ST,
so use smu8 as the prefix of the files name.

Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>

Change-Id: I758b6d32aef5b6d2c72c9f3de5811c70945f073b
---
 drivers/gpu/drm/amd/powerplay/hwmgr/Makefile       |    4 +-
 .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  |  209 ---
 .../drm/amd/powerplay/hwmgr/cz_clockpowergating.h  |   36 -
 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     | 1886 --------------------
 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h     |  322 ----
 .../amd/powerplay/hwmgr/smu8_clockpowergating.c    |  209 +++
 .../amd/powerplay/hwmgr/smu8_clockpowergating.h    |   36 +
 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c   | 1886 ++++++++++++++++++++
 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h   |  322 ++++
 drivers/gpu/drm/amd/powerplay/smumgr/Makefile      |    2 +-
 drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c   |  883 ---------
 drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h   |   99 -
 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c |  883 +++++++++
 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h |   99 +
 14 files changed, 3438 insertions(+), 3438 deletions(-)
 delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
 delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
 delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
 delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
 delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
 delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 529f062..5813de9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -24,8 +24,8 @@
 # It provides the hardware management services for the driver.
 
 HARDWARE_MGR = hwmgr.o processpptables.o \
-		hardwaremanager.o cz_hwmgr.o \
-		cz_clockpowergating.o pppcielanes.o\
+		hardwaremanager.o smu8_hwmgr.o \
+		smu8_clockpowergating.o pppcielanes.o\
 		process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
 		smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
 		smu7_clockpowergating.o \
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
deleted file mode 100644
index 416abeb..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "cz_clockpowergating.h"
-#include "cz_ppsmc.h"
-
-/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
-    0    GFX0L (3:0),                  (27:24),
-    1    GFX0H (7:4),                  (31:28),
-    2    GFX1L (3:0),                  (19:16),
-    3    GFX1H (7:4),                  (23:20),
-    4    DDIL   (3:0),                   (11: 8),
-    5    DDIH  (7:4),                   (15:12),
-    6    DDI2L (3:0),                   ( 3: 0),
-    7    DDI2H (7:4),                   ( 7: 4),
-*/
-#define DDI_PHY_GEN_STATUS_VAL(phyID)   (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
-#define IS_PHY_ID_USED_BY_PLL(PhyID)    (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
-
-
-int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
-	int ret = 0;
-
-	switch (block) {
-	case PHM_AsicBlock_UVD_MVC:
-	case PHM_AsicBlock_UVD:
-	case PHM_AsicBlock_UVD_HD:
-	case PHM_AsicBlock_UVD_SD:
-		if (gating == PHM_ClockGateSetting_StaticOff)
-			ret = cz_dpm_powerdown_uvd(hwmgr);
-		else
-			ret = cz_dpm_powerup_uvd(hwmgr);
-		break;
-	case PHM_AsicBlock_GFX:
-	default:
-		break;
-	}
-
-	return ret;
-}
-
-
-bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
-{
-	return true;
-}
-
-
-int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return 0;
-}
-
-int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
-{
-	/* TODO */
-	return 0;
-}
-
-int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
-{
-	/* TODO */
-	return 0;
-}
-
-int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
-{
-	/* TODO */
-	return 0;
-}
-
-int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
-{
-	/* TODO */
-	return 0;
-}
-
-int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
-{
-	/* TODO */
-	return 0;
-}
-
-int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	uint32_t dpm_features = 0;
-
-	if (enable &&
-		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				  PHM_PlatformCaps_UVDDPM)) {
-		cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
-		dpm_features |= UVD_DPM_MASK;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			    PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
-	} else {
-		dpm_features |= UVD_DPM_MASK;
-		cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			   PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
-	}
-	return 0;
-}
-
-int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	uint32_t dpm_features = 0;
-
-	if (enable && phm_cap_enabled(
-				hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_VCEDPM)) {
-		cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
-		dpm_features |= VCE_DPM_MASK;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			    PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
-	} else {
-		dpm_features |= VCE_DPM_MASK;
-		cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			   PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
-	}
-
-	return 0;
-}
-
-
-void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	cz_hwmgr->uvd_power_gated = bgate;
-
-	if (bgate) {
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_PG_STATE_GATE);
-		cgs_set_clockgating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_GATE);
-		cz_dpm_update_uvd_dpm(hwmgr, true);
-		cz_dpm_powerdown_uvd(hwmgr);
-	} else {
-		cz_dpm_powerup_uvd(hwmgr);
-		cgs_set_clockgating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_UNGATE);
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_PG_STATE_UNGATE);
-		cz_dpm_update_uvd_dpm(hwmgr, false);
-	}
-
-}
-
-void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	if (bgate) {
-		cgs_set_powergating_state(
-					hwmgr->device,
-					AMD_IP_BLOCK_TYPE_VCE,
-					AMD_PG_STATE_GATE);
-		cgs_set_clockgating_state(
-					hwmgr->device,
-					AMD_IP_BLOCK_TYPE_VCE,
-					AMD_CG_STATE_GATE);
-		cz_enable_disable_vce_dpm(hwmgr, false);
-		cz_dpm_powerdown_vce(hwmgr);
-		cz_hwmgr->vce_power_gated = true;
-	} else {
-		cz_dpm_powerup_vce(hwmgr);
-		cz_hwmgr->vce_power_gated = false;
-		cgs_set_clockgating_state(
-					hwmgr->device,
-					AMD_IP_BLOCK_TYPE_VCE,
-					AMD_CG_STATE_UNGATE);
-		cgs_set_powergating_state(
-					hwmgr->device,
-					AMD_IP_BLOCK_TYPE_VCE,
-					AMD_PG_STATE_UNGATE);
-		cz_dpm_update_vce_dpm(hwmgr);
-		cz_enable_disable_vce_dpm(hwmgr, true);
-	}
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
deleted file mode 100644
index 92f707b..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _CZ_CLOCK_POWER_GATING_H_
-#define _CZ_CLOCK_POWER_GATING_H_
-
-#include "cz_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
deleted file mode 100644
index 8c1f884..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ /dev/null
@@ -1,1886 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "pp_debug.h"
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "atom-types.h"
-#include "atombios.h"
-#include "processpptables.h"
-#include "cgs_common.h"
-#include "smu/smu_8_0_d.h"
-#include "smu8_fusion.h"
-#include "smu/smu_8_0_sh_mask.h"
-#include "smumgr.h"
-#include "hwmgr.h"
-#include "hardwaremanager.h"
-#include "cz_ppsmc.h"
-#include "cz_hwmgr.h"
-#include "power_state.h"
-#include "cz_clockpowergating.h"
-#include "pp_thermal.h"
-
-#define ixSMUSVI_NB_CURRENTVID 0xD8230044
-#define CURRENT_NB_VID_MASK 0xff000000
-#define CURRENT_NB_VID__SHIFT 24
-#define ixSMUSVI_GFX_CURRENTVID  0xD8230048
-#define CURRENT_GFX_VID_MASK 0xff000000
-#define CURRENT_GFX_VID__SHIFT 24
-
-static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic;
-
-static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps)
-{
-	if (PhwCz_Magic != hw_ps->magic)
-		return NULL;
-
-	return (struct cz_power_state *)hw_ps;
-}
-
-static const struct cz_power_state *cast_const_PhwCzPowerState(
-				const struct pp_hw_power_state *hw_ps)
-{
-	if (PhwCz_Magic != hw_ps->magic)
-		return NULL;
-
-	return (struct cz_power_state *)hw_ps;
-}
-
-static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
-					uint32_t clock, uint32_t msg)
-{
-	int i = 0;
-	struct phm_vce_clock_voltage_dependency_table *ptable =
-		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
-	switch (msg) {
-	case PPSMC_MSG_SetEclkSoftMin:
-	case PPSMC_MSG_SetEclkHardMin:
-		for (i = 0; i < (int)ptable->count; i++) {
-			if (clock <= ptable->entries[i].ecclk)
-				break;
-		}
-		break;
-
-	case PPSMC_MSG_SetEclkSoftMax:
-	case PPSMC_MSG_SetEclkHardMax:
-		for (i = ptable->count - 1; i >= 0; i--) {
-			if (clock >= ptable->entries[i].ecclk)
-				break;
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return i;
-}
-
-static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
-				uint32_t clock, uint32_t msg)
-{
-	int i = 0;
-	struct phm_clock_voltage_dependency_table *table =
-				hwmgr->dyn_state.vddc_dependency_on_sclk;
-
-	switch (msg) {
-	case PPSMC_MSG_SetSclkSoftMin:
-	case PPSMC_MSG_SetSclkHardMin:
-		for (i = 0; i < (int)table->count; i++) {
-			if (clock <= table->entries[i].clk)
-				break;
-		}
-		break;
-
-	case PPSMC_MSG_SetSclkSoftMax:
-	case PPSMC_MSG_SetSclkHardMax:
-		for (i = table->count - 1; i >= 0; i--) {
-			if (clock >= table->entries[i].clk)
-				break;
-		}
-		break;
-
-	default:
-		break;
-	}
-	return i;
-}
-
-static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
-					uint32_t clock, uint32_t msg)
-{
-	int i = 0;
-	struct phm_uvd_clock_voltage_dependency_table *ptable =
-		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
-	switch (msg) {
-	case PPSMC_MSG_SetUvdSoftMin:
-	case PPSMC_MSG_SetUvdHardMin:
-		for (i = 0; i < (int)ptable->count; i++) {
-			if (clock <= ptable->entries[i].vclk)
-				break;
-		}
-		break;
-
-	case PPSMC_MSG_SetUvdSoftMax:
-	case PPSMC_MSG_SetUvdHardMax:
-		for (i = ptable->count - 1; i >= 0; i--) {
-			if (clock >= ptable->entries[i].vclk)
-				break;
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return i;
-}
-
-static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	if (cz_hwmgr->max_sclk_level == 0) {
-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
-		cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
-	}
-
-	return cz_hwmgr->max_sclk_level;
-}
-
-static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct amdgpu_device *adev = hwmgr->adev;
-
-	cz_hwmgr->gfx_ramp_step = 256*25/100;
-	cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
-
-	cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
-	cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
-	cz_hwmgr->clock_slow_down_freq = 25000;
-	cz_hwmgr->skip_clock_slow_down = 1;
-	cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
-	cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
-	cz_hwmgr->voting_rights_clients = 0x00C00033;
-	cz_hwmgr->static_screen_threshold = 8;
-	cz_hwmgr->ddi_power_gating_disabled = 0;
-	cz_hwmgr->bapm_enabled = 1;
-	cz_hwmgr->voltage_drop_threshold = 0;
-	cz_hwmgr->gfx_power_gating_threshold = 500;
-	cz_hwmgr->vce_slow_sclk_threshold = 20000;
-	cz_hwmgr->dce_slow_sclk_threshold = 30000;
-	cz_hwmgr->disable_driver_thermal_policy = 1;
-	cz_hwmgr->disable_nb_ps3_in_battery = 0;
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-							PHM_PlatformCaps_ABM);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_NonABMSupportInPPLib);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_DynamicM3Arbiter);
-
-	cz_hwmgr->override_dynamic_mgpg = 1;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				  PHM_PlatformCaps_DynamicPatchPowerState);
-
-	cz_hwmgr->thermal_auto_throttling_treshold = 0;
-	cz_hwmgr->tdr_clock = 0;
-	cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_DynamicUVDState);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_UVDDPM);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_VCEDPM);
-
-	cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
-	cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
-	cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
-	cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				   PHM_PlatformCaps_DisableVoltageIsland);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-		      PHM_PlatformCaps_UVDPowerGating);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-		      PHM_PlatformCaps_VCEPowerGating);
-
-	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			      PHM_PlatformCaps_UVDPowerGating);
-	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			      PHM_PlatformCaps_VCEPowerGating);
-
-
-	return 0;
-}
-
-static uint32_t cz_convert_8Bit_index_to_voltage(
-			struct pp_hwmgr *hwmgr, uint16_t voltage)
-{
-	return 6200 - (voltage * 25);
-}
-
-static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
-			struct phm_clock_and_voltage_limits *table)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
-	struct cz_sys_info *sys_info = &cz_hwmgr->sys_info;
-	struct phm_clock_voltage_dependency_table *dep_table =
-				hwmgr->dyn_state.vddc_dependency_on_sclk;
-
-	if (dep_table->count > 0) {
-		table->sclk = dep_table->entries[dep_table->count-1].clk;
-		table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr,
-		   (uint16_t)dep_table->entries[dep_table->count-1].v);
-	}
-	table->mclk = sys_info->nbp_memory_clock[0];
-	return 0;
-}
-
-static int cz_init_dynamic_state_adjustment_rule_settings(
-			struct pp_hwmgr *hwmgr,
-			ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
-{
-	uint32_t table_size =
-		sizeof(struct phm_clock_voltage_dependency_table) +
-		(7 * sizeof(struct phm_clock_voltage_dependency_record));
-
-	struct phm_clock_voltage_dependency_table *table_clk_vlt =
-					kzalloc(table_size, GFP_KERNEL);
-
-	if (NULL == table_clk_vlt) {
-		pr_err("Can not allocate memory!\n");
-		return -ENOMEM;
-	}
-
-	table_clk_vlt->count = 8;
-	table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
-	table_clk_vlt->entries[0].v = 0;
-	table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
-	table_clk_vlt->entries[1].v = 1;
-	table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
-	table_clk_vlt->entries[2].v = 2;
-	table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
-	table_clk_vlt->entries[3].v = 3;
-	table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
-	table_clk_vlt->entries[4].v = 4;
-	table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
-	table_clk_vlt->entries[5].v = 5;
-	table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
-	table_clk_vlt->entries[6].v = 6;
-	table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
-	table_clk_vlt->entries[7].v = 7;
-	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
-
-	return 0;
-}
-
-static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
-	ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
-	uint32_t i;
-	int result = 0;
-	uint8_t frev, crev;
-	uint16_t size;
-
-	info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table(
-			hwmgr->device,
-			GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
-			&size, &frev, &crev);
-
-	if (crev != 9) {
-		pr_err("Unsupported IGP table: %d %d\n", frev, crev);
-		return -EINVAL;
-	}
-
-	if (info == NULL) {
-		pr_err("Could not retrieve the Integrated System Info Table!\n");
-		return -EINVAL;
-	}
-
-	cz_hwmgr->sys_info.bootup_uma_clock =
-				   le32_to_cpu(info->ulBootUpUMAClock);
-
-	cz_hwmgr->sys_info.bootup_engine_clock =
-				le32_to_cpu(info->ulBootUpEngineClock);
-
-	cz_hwmgr->sys_info.dentist_vco_freq =
-				   le32_to_cpu(info->ulDentistVCOFreq);
-
-	cz_hwmgr->sys_info.system_config =
-				     le32_to_cpu(info->ulSystemConfig);
-
-	cz_hwmgr->sys_info.bootup_nb_voltage_index =
-				  le16_to_cpu(info->usBootUpNBVoltage);
-
-	cz_hwmgr->sys_info.htc_hyst_lmt =
-			(info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
-
-	cz_hwmgr->sys_info.htc_tmp_lmt =
-			(info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
-
-	if (cz_hwmgr->sys_info.htc_tmp_lmt <=
-			cz_hwmgr->sys_info.htc_hyst_lmt) {
-		pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
-		return -EINVAL;
-	}
-
-	cz_hwmgr->sys_info.nb_dpm_enable =
-				cz_hwmgr->enable_nb_ps_policy &&
-				(le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
-
-	for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
-		if (i < CZ_NUM_NBPMEMORYCLOCK) {
-			cz_hwmgr->sys_info.nbp_memory_clock[i] =
-			  le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
-		}
-		cz_hwmgr->sys_info.nbp_n_clock[i] =
-			    le32_to_cpu(info->ulNbpStateNClkFreq[i]);
-	}
-
-	for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
-		cz_hwmgr->sys_info.display_clock[i] =
-					le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
-	}
-
-	/* Here use 4 levels, make sure not exceed */
-	for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
-		cz_hwmgr->sys_info.nbp_voltage_index[i] =
-			     le16_to_cpu(info->usNBPStateVoltage[i]);
-	}
-
-	if (!cz_hwmgr->sys_info.nb_dpm_enable) {
-		for (i = 1; i < CZ_NUM_NBPSTATES; i++) {
-			if (i < CZ_NUM_NBPMEMORYCLOCK) {
-				cz_hwmgr->sys_info.nbp_memory_clock[i] =
-				    cz_hwmgr->sys_info.nbp_memory_clock[0];
-			}
-			cz_hwmgr->sys_info.nbp_n_clock[i] =
-				    cz_hwmgr->sys_info.nbp_n_clock[0];
-			cz_hwmgr->sys_info.nbp_voltage_index[i] =
-				    cz_hwmgr->sys_info.nbp_voltage_index[0];
-		}
-	}
-
-	if (le32_to_cpu(info->ulGPUCapInfo) &
-		SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_EnableDFSBypass);
-	}
-
-	cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber;
-
-	cz_construct_max_power_limits_table (hwmgr,
-				    &hwmgr->dyn_state.max_clock_voltage_on_ac);
-
-	cz_init_dynamic_state_adjustment_rule_settings(hwmgr,
-				    &info->sDISPCLK_Voltage[0]);
-
-	return result;
-}
-
-static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	cz_hwmgr->boot_power_level.engineClock =
-				cz_hwmgr->sys_info.bootup_engine_clock;
-
-	cz_hwmgr->boot_power_level.vddcIndex =
-			(uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
-
-	cz_hwmgr->boot_power_level.dsDividerIndex = 0;
-	cz_hwmgr->boot_power_level.ssDividerIndex = 0;
-	cz_hwmgr->boot_power_level.allowGnbSlow = 1;
-	cz_hwmgr->boot_power_level.forceNBPstate = 0;
-	cz_hwmgr->boot_power_level.hysteresis_up = 0;
-	cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
-	cz_hwmgr->boot_power_level.display_wm = 0;
-	cz_hwmgr->boot_power_level.vce_wm = 0;
-
-	return 0;
-}
-
-static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
-{
-	struct SMU8_Fusion_ClkTable *clock_table;
-	int ret;
-	uint32_t i;
-	void *table = NULL;
-	pp_atomctrl_clock_dividers_kong dividers;
-
-	struct phm_clock_voltage_dependency_table *vddc_table =
-		hwmgr->dyn_state.vddc_dependency_on_sclk;
-	struct phm_clock_voltage_dependency_table *vdd_gfx_table =
-		hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
-	struct phm_acp_clock_voltage_dependency_table *acp_table =
-		hwmgr->dyn_state.acp_clock_voltage_dependency_table;
-	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
-		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-	struct phm_vce_clock_voltage_dependency_table *vce_table =
-		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
-	if (!hwmgr->need_pp_table_upload)
-		return 0;
-
-	ret = smum_download_powerplay_table(hwmgr, &table);
-
-	PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
-			    "Fail to get clock table from SMU!", return -EINVAL;);
-
-	clock_table = (struct SMU8_Fusion_ClkTable *)table;
-
-	/* patch clock table */
-	PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
-			    "Dependency table entry exceeds max limit!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
-			    "Dependency table entry exceeds max limit!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
-			    "Dependency table entry exceeds max limit!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
-			    "Dependency table entry exceeds max limit!", return -EINVAL;);
-	PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
-			    "Dependency table entry exceeds max limit!", return -EINVAL;);
-
-	for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
-
-		/* vddc_sclk */
-		clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
-			(i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
-		clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
-			(i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
-
-		atomctrl_get_engine_pll_dividers_kong(hwmgr,
-						      clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
-						      &dividers);
-
-		clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
-			(uint8_t)dividers.pll_post_divider;
-
-		/* vddgfx_sclk */
-		clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
-			(i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
-
-		/* acp breakdown */
-		clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
-			(i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
-		clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
-			(i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
-
-		atomctrl_get_engine_pll_dividers_kong(hwmgr,
-						      clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
-						      &dividers);
-
-		clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
-			(uint8_t)dividers.pll_post_divider;
-
-
-		/* uvd breakdown */
-		clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
-			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
-		clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
-			(i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
-
-		atomctrl_get_engine_pll_dividers_kong(hwmgr,
-						      clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
-						      &dividers);
-
-		clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
-			(uint8_t)dividers.pll_post_divider;
-
-		clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
-			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
-		clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
-			(i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
-
-		atomctrl_get_engine_pll_dividers_kong(hwmgr,
-						      clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
-						      &dividers);
-
-		clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
-			(uint8_t)dividers.pll_post_divider;
-
-		/* vce breakdown */
-		clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
-			(i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
-		clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
-			(i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
-
-
-		atomctrl_get_engine_pll_dividers_kong(hwmgr,
-						      clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
-						      &dividers);
-
-		clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
-			(uint8_t)dividers.pll_post_divider;
-
-	}
-	ret = smum_upload_powerplay_table(hwmgr);
-
-	return ret;
-}
-
-static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_clock_voltage_dependency_table *table =
-					hwmgr->dyn_state.vddc_dependency_on_sclk;
-	unsigned long clock = 0, level;
-
-	if (NULL == table || table->count <= 0)
-		return -EINVAL;
-
-	cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
-	cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
-
-	level = cz_get_max_sclk_level(hwmgr) - 1;
-
-	if (level < table->count)
-		clock = table->entries[level].clk;
-	else
-		clock = table->entries[table->count - 1].clk;
-
-	cz_hwmgr->sclk_dpm.soft_max_clk = clock;
-	cz_hwmgr->sclk_dpm.hard_max_clk = clock;
-
-	return 0;
-}
-
-static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_uvd_clock_voltage_dependency_table *table =
-				hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-	unsigned long clock = 0, level;
-
-	if (NULL == table || table->count <= 0)
-		return -EINVAL;
-
-	cz_hwmgr->uvd_dpm.soft_min_clk = 0;
-	cz_hwmgr->uvd_dpm.hard_min_clk = 0;
-
-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
-	level = smum_get_argument(hwmgr);
-
-	if (level < table->count)
-		clock = table->entries[level].vclk;
-	else
-		clock = table->entries[table->count - 1].vclk;
-
-	cz_hwmgr->uvd_dpm.soft_max_clk = clock;
-	cz_hwmgr->uvd_dpm.hard_max_clk = clock;
-
-	return 0;
-}
-
-static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_vce_clock_voltage_dependency_table *table =
-				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-	unsigned long clock = 0, level;
-
-	if (NULL == table || table->count <= 0)
-		return -EINVAL;
-
-	cz_hwmgr->vce_dpm.soft_min_clk = 0;
-	cz_hwmgr->vce_dpm.hard_min_clk = 0;
-
-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
-	level = smum_get_argument(hwmgr);
-
-	if (level < table->count)
-		clock = table->entries[level].ecclk;
-	else
-		clock = table->entries[table->count - 1].ecclk;
-
-	cz_hwmgr->vce_dpm.soft_max_clk = clock;
-	cz_hwmgr->vce_dpm.hard_max_clk = clock;
-
-	return 0;
-}
-
-static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_acp_clock_voltage_dependency_table *table =
-				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
-	unsigned long clock = 0, level;
-
-	if (NULL == table || table->count <= 0)
-		return -EINVAL;
-
-	cz_hwmgr->acp_dpm.soft_min_clk = 0;
-	cz_hwmgr->acp_dpm.hard_min_clk = 0;
-
-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
-	level = smum_get_argument(hwmgr);
-
-	if (level < table->count)
-		clock = table->entries[level].acpclk;
-	else
-		clock = table->entries[table->count - 1].acpclk;
-
-	cz_hwmgr->acp_dpm.soft_max_clk = clock;
-	cz_hwmgr->acp_dpm.hard_max_clk = clock;
-	return 0;
-}
-
-static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	cz_hwmgr->uvd_power_gated = false;
-	cz_hwmgr->vce_power_gated = false;
-	cz_hwmgr->samu_power_gated = false;
-	cz_hwmgr->acp_power_gated = false;
-	cz_hwmgr->pgacpinit = true;
-}
-
-static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	cz_hwmgr->low_sclk_interrupt_threshold = 0;
-}
-
-static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_clock_voltage_dependency_table *table =
-					hwmgr->dyn_state.vddc_dependency_on_sclk;
-
-	unsigned long clock = 0;
-	unsigned long level;
-	unsigned long stable_pstate_sclk;
-	unsigned long percentage;
-
-	cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
-	level = cz_get_max_sclk_level(hwmgr) - 1;
-
-	if (level < table->count)
-		cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[level].clk;
-	else
-		cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
-
-	clock = hwmgr->display_config.min_core_set_clock;
-	if (clock == 0)
-		pr_debug("min_core_set_clock not set\n");
-
-	if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
-		cz_hwmgr->sclk_dpm.hard_min_clk = clock;
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSclkHardMin,
-						 cz_get_sclk_level(hwmgr,
-					cz_hwmgr->sclk_dpm.hard_min_clk,
-					     PPSMC_MSG_SetSclkHardMin));
-	}
-
-	clock = cz_hwmgr->sclk_dpm.soft_min_clk;
-
-	/* update minimum clocks for Stable P-State feature */
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				     PHM_PlatformCaps_StablePState)) {
-		percentage = 75;
-		/*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table  */
-		stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
-					percentage) / 100;
-
-		if (clock < stable_pstate_sclk)
-			clock = stable_pstate_sclk;
-	}
-
-	if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
-		cz_hwmgr->sclk_dpm.soft_min_clk = clock;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSclkSoftMin,
-						cz_get_sclk_level(hwmgr,
-					cz_hwmgr->sclk_dpm.soft_min_clk,
-					     PPSMC_MSG_SetSclkSoftMin));
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_StablePState) &&
-			 cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
-		cz_hwmgr->sclk_dpm.soft_max_clk = clock;
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetSclkSoftMax,
-						cz_get_sclk_level(hwmgr,
-					cz_hwmgr->sclk_dpm.soft_max_clk,
-					PPSMC_MSG_SetSclkSoftMax));
-	}
-
-	return 0;
-}
-
-static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_SclkDeepSleep)) {
-		uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
-		if (clks == 0)
-			clks = CZ_MIN_DEEP_SLEEP_SCLK;
-
-		PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetMinDeepSleepSclk,
-				clks);
-	}
-
-	return 0;
-}
-
-static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr =
-				  (struct cz_hwmgr *)(hwmgr->backend);
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_SetWatermarkFrequency,
-					cz_hwmgr->sclk_dpm.soft_max_clk);
-
-	return 0;
-}
-
-static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
-{
-	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
-	if (hw_data->is_nb_dpm_enabled) {
-		if (enable) {
-			PP_DBG_LOG("enable Low Memory PState.\n");
-
-			return smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_EnableLowMemoryPstate,
-						(lock ? 1 : 0));
-		} else {
-			PP_DBG_LOG("disable Low Memory PState.\n");
-
-			return smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_DisableLowMemoryPstate,
-						(lock ? 1 : 0));
-		}
-	}
-
-	return 0;
-}
-
-static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
-{
-	int ret = 0;
-
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	unsigned long dpm_features = 0;
-
-	if (cz_hwmgr->is_nb_dpm_enabled) {
-		cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
-		dpm_features |= NB_DPM_MASK;
-		ret = smum_send_msg_to_smc_with_parameter(
-							  hwmgr,
-							  PPSMC_MSG_DisableAllSmuFeatures,
-							  dpm_features);
-		if (ret == 0)
-			cz_hwmgr->is_nb_dpm_enabled = false;
-	}
-
-	return ret;
-}
-
-static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
-{
-	int ret = 0;
-
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	unsigned long dpm_features = 0;
-
-	if (!cz_hwmgr->is_nb_dpm_enabled) {
-		PP_DBG_LOG("enabling ALL SMU features.\n");
-		dpm_features |= NB_DPM_MASK;
-		ret = smum_send_msg_to_smc_with_parameter(
-							  hwmgr,
-							  PPSMC_MSG_EnableAllSmuFeatures,
-							  dpm_features);
-		if (ret == 0)
-			cz_hwmgr->is_nb_dpm_enabled = true;
-	}
-
-	return ret;
-}
-
-static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
-{
-	bool disable_switch;
-	bool enable_low_mem_state;
-	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-	const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
-	const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state);
-
-	if (hw_data->sys_info.nb_dpm_enable) {
-		disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
-		enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
-
-		if (pnew_state->action == FORCE_HIGH)
-			cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
-		else if (pnew_state->action == CANCEL_FORCE_HIGH)
-			cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
-		else
-			cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
-	}
-	return 0;
-}
-
-static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
-	int ret = 0;
-
-	cz_update_sclk_limit(hwmgr);
-	cz_set_deep_sleep_sclk_threshold(hwmgr);
-	cz_set_watermark_threshold(hwmgr);
-	ret = cz_enable_nb_dpm(hwmgr);
-	if (ret)
-		return ret;
-	cz_update_low_mem_pstate(hwmgr, input);
-
-	return 0;
-};
-
-
-static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-	int ret;
-
-	ret = cz_upload_pptable_to_smu(hwmgr);
-	if (ret)
-		return ret;
-	ret = cz_init_sclk_limit(hwmgr);
-	if (ret)
-		return ret;
-	ret = cz_init_uvd_limit(hwmgr);
-	if (ret)
-		return ret;
-	ret = cz_init_vce_limit(hwmgr);
-	if (ret)
-		return ret;
-	ret = cz_init_acp_limit(hwmgr);
-	if (ret)
-		return ret;
-
-	cz_init_power_gate_state(hwmgr);
-	cz_init_sclk_threshold(hwmgr);
-
-	return 0;
-}
-
-static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
-	hw_data->disp_clk_bypass_pending = false;
-	hw_data->disp_clk_bypass = false;
-}
-
-static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
-	hw_data->is_nb_dpm_enabled = false;
-}
-
-static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
-	hw_data->cc6_settings.cc6_setting_changed = false;
-	hw_data->cc6_settings.cpu_pstate_separation_time = 0;
-	hw_data->cc6_settings.cpu_cc6_disable = false;
-	hw_data->cc6_settings.cpu_pstate_disable = false;
-}
-
-static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
-{
-	cz_power_up_display_clock_sys_pll(hwmgr);
-	cz_clear_nb_dpm_flag(hwmgr);
-	cz_reset_cc6_data(hwmgr);
-	return 0;
-};
-
-static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
-	PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
-				PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
-}
-
-static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
-	PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
-}
-
-static int cz_start_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
-
-	return smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_EnableAllSmuFeatures,
-				SCLK_DPM_MASK);
-}
-
-static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
-{
-	int ret = 0;
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	unsigned long dpm_features = 0;
-
-	if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
-		dpm_features |= SCLK_DPM_MASK;
-		cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
-		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DisableAllSmuFeatures,
-					dpm_features);
-	}
-	return ret;
-}
-
-static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
-	cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMin,
-				cz_get_sclk_level(hwmgr,
-				cz_hwmgr->sclk_dpm.soft_min_clk,
-				PPSMC_MSG_SetSclkSoftMin));
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMax,
-				cz_get_sclk_level(hwmgr,
-				cz_hwmgr->sclk_dpm.soft_max_clk,
-				PPSMC_MSG_SetSclkSoftMax));
-
-	return 0;
-}
-
-static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	cz_hwmgr->acp_boot_level = 0xff;
-}
-
-static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	cz_disable_nb_dpm(hwmgr);
-
-	cz_clear_voting_clients(hwmgr);
-	if (cz_stop_dpm(hwmgr))
-		return -EINVAL;
-
-	return 0;
-};
-
-static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	cz_program_voting_clients(hwmgr);
-	if (cz_start_dpm(hwmgr))
-		return -EINVAL;
-	cz_program_bootup_state(hwmgr);
-	cz_reset_acp_boot_level(hwmgr);
-
-	return 0;
-};
-
-static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-				struct pp_power_state  *prequest_ps,
-			const struct pp_power_state *pcurrent_ps)
-{
-	struct cz_power_state *cz_ps =
-				cast_PhwCzPowerState(&prequest_ps->hardware);
-
-	const struct cz_power_state *cz_current_ps =
-				cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
-
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct PP_Clocks clocks = {0, 0, 0, 0};
-	bool force_high;
-	uint32_t  num_of_active_displays = 0;
-	struct cgs_display_info info = {0};
-
-	cz_ps->need_dfs_bypass = true;
-
-	cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
-
-	clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
-				hwmgr->display_config.min_mem_set_clock :
-				cz_hwmgr->sys_info.nbp_memory_clock[1];
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	num_of_active_displays = info.display_count;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-		clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
-
-	force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
-			|| (num_of_active_displays >= 3);
-
-	cz_ps->action = cz_current_ps->action;
-
-	if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
-		cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
-	else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
-		cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
-	else if (!force_high && (cz_ps->action == FORCE_HIGH))
-		cz_ps->action = CANCEL_FORCE_HIGH;
-	else if (force_high && (cz_ps->action != FORCE_HIGH))
-		cz_ps->action = FORCE_HIGH;
-	else
-		cz_ps->action = DO_NOTHING;
-
-	return 0;
-}
-
-static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-	int result = 0;
-	struct cz_hwmgr *data;
-
-	data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
-	if (data == NULL)
-		return -ENOMEM;
-
-	hwmgr->backend = data;
-
-	result = cz_initialize_dpm_defaults(hwmgr);
-	if (result != 0) {
-		pr_err("cz_initialize_dpm_defaults failed\n");
-		return result;
-	}
-
-	result = cz_get_system_info_data(hwmgr);
-	if (result != 0) {
-		pr_err("cz_get_system_info_data failed\n");
-		return result;
-	}
-
-	cz_construct_boot_state(hwmgr);
-
-	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  CZ_MAX_HARDWARE_POWERLEVELS;
-
-	return result;
-}
-
-static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-	if (hwmgr != NULL) {
-		kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
-		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
-
-		kfree(hwmgr->backend);
-		hwmgr->backend = NULL;
-	}
-	return 0;
-}
-
-static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_SetSclkSoftMin,
-					cz_get_sclk_level(hwmgr,
-					cz_hwmgr->sclk_dpm.soft_max_clk,
-					PPSMC_MSG_SetSclkSoftMin));
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMax,
-				cz_get_sclk_level(hwmgr,
-				cz_hwmgr->sclk_dpm.soft_max_clk,
-				PPSMC_MSG_SetSclkSoftMax));
-
-	return 0;
-}
-
-static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_clock_voltage_dependency_table *table =
-				hwmgr->dyn_state.vddc_dependency_on_sclk;
-	unsigned long clock = 0, level;
-
-	if (NULL == table || table->count <= 0)
-		return -EINVAL;
-
-	cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
-	cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
-	hwmgr->pstate_sclk = table->entries[0].clk;
-	hwmgr->pstate_mclk = 0;
-
-	level = cz_get_max_sclk_level(hwmgr) - 1;
-
-	if (level < table->count)
-		clock = table->entries[level].clk;
-	else
-		clock = table->entries[table->count - 1].clk;
-
-	cz_hwmgr->sclk_dpm.soft_max_clk = clock;
-	cz_hwmgr->sclk_dpm.hard_max_clk = clock;
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMin,
-				cz_get_sclk_level(hwmgr,
-				cz_hwmgr->sclk_dpm.soft_min_clk,
-				PPSMC_MSG_SetSclkSoftMin));
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMax,
-				cz_get_sclk_level(hwmgr,
-				cz_hwmgr->sclk_dpm.soft_max_clk,
-				PPSMC_MSG_SetSclkSoftMax));
-
-	return 0;
-}
-
-static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetSclkSoftMax,
-			cz_get_sclk_level(hwmgr,
-			cz_hwmgr->sclk_dpm.soft_min_clk,
-			PPSMC_MSG_SetSclkSoftMax));
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMin,
-				cz_get_sclk_level(hwmgr,
-				cz_hwmgr->sclk_dpm.soft_min_clk,
-				PPSMC_MSG_SetSclkSoftMin));
-
-	return 0;
-}
-
-static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
-				enum amd_dpm_forced_level level)
-{
-	int ret = 0;
-
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-		ret = cz_phm_force_dpm_highest(hwmgr);
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
-		ret = cz_phm_force_dpm_lowest(hwmgr);
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-		ret = cz_phm_unforce_dpm_levels(hwmgr);
-		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
-	default:
-		break;
-	}
-
-	return ret;
-}
-
-int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
-	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
-		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
-	return 0;
-}
-
-int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
-{
-	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-		return smum_send_msg_to_smc_with_parameter(
-			hwmgr,
-			PPSMC_MSG_UVDPowerON,
-			PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
-	}
-
-	return 0;
-}
-
-int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_uvd_clock_voltage_dependency_table *ptable =
-		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
-	if (!bgate) {
-		/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
-		if (PP_CAP(PHM_PlatformCaps_StablePState) ||
-		    hwmgr->en_umd_pstate) {
-			cz_hwmgr->uvd_dpm.hard_min_clk =
-				   ptable->entries[ptable->count - 1].vclk;
-
-			smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetUvdHardMin,
-				cz_get_uvd_level(hwmgr,
-					cz_hwmgr->uvd_dpm.hard_min_clk,
-					PPSMC_MSG_SetUvdHardMin));
-
-			cz_enable_disable_uvd_dpm(hwmgr, true);
-		} else {
-			cz_enable_disable_uvd_dpm(hwmgr, true);
-		}
-	} else {
-		cz_enable_disable_uvd_dpm(hwmgr, false);
-	}
-
-	return 0;
-}
-
-int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_vce_clock_voltage_dependency_table *ptable =
-		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
-	/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
-	if (PP_CAP(PHM_PlatformCaps_StablePState) ||
-	    hwmgr->en_umd_pstate) {
-		cz_hwmgr->vce_dpm.hard_min_clk =
-				  ptable->entries[ptable->count - 1].ecclk;
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetEclkHardMin,
-			cz_get_eclk_level(hwmgr,
-				cz_hwmgr->vce_dpm.hard_min_clk,
-				PPSMC_MSG_SetEclkHardMin));
-	} else {
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_SetEclkHardMin, 0);
-		/* disable ECLK DPM 0. Otherwise VCE could hang if
-		 * switching SCLK from DPM 0 to 6/7 */
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_SetEclkSoftMin, 1);
-	}
-	return 0;
-}
-
-int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
-{
-	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
-		return smum_send_msg_to_smc(hwmgr,
-						     PPSMC_MSG_VCEPowerOFF);
-	return 0;
-}
-
-int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
-{
-	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
-		return smum_send_msg_to_smc(hwmgr,
-						     PPSMC_MSG_VCEPowerON);
-	return 0;
-}
-
-static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	return cz_hwmgr->sys_info.bootup_uma_clock;
-}
-
-static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct pp_power_state  *ps;
-	struct cz_power_state  *cz_ps;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	cz_ps = cast_PhwCzPowerState(&ps->hardware);
-
-	if (low)
-		return cz_ps->levels[0].engineClock;
-	else
-		return cz_ps->levels[cz_ps->level-1].engineClock;
-}
-
-static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
-					struct pp_hw_power_state *hw_ps)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
-
-	cz_ps->level = 1;
-	cz_ps->nbps_flags = 0;
-	cz_ps->bapm_flags = 0;
-	cz_ps->levels[0] = cz_hwmgr->boot_power_level;
-
-	return 0;
-}
-
-static int cz_dpm_get_pp_table_entry_callback(
-						     struct pp_hwmgr *hwmgr,
-					   struct pp_hw_power_state *hw_ps,
-							  unsigned int index,
-						     const void *clock_info)
-{
-	struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
-
-	const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info;
-
-	struct phm_clock_voltage_dependency_table *table =
-				    hwmgr->dyn_state.vddc_dependency_on_sclk;
-	uint8_t clock_info_index = cz_clock_info->index;
-
-	if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
-		clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
-
-	cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
-	cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
-
-	cz_ps->level = index + 1;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-		cz_ps->levels[index].dsDividerIndex = 5;
-		cz_ps->levels[index].ssDividerIndex = 5;
-	}
-
-	return 0;
-}
-
-static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	unsigned long ret = 0;
-
-	result = pp_tables_get_num_of_entries(hwmgr, &ret);
-
-	return result ? 0 : ret;
-}
-
-static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-		    unsigned long entry, struct pp_power_state *ps)
-{
-	int result;
-	struct cz_power_state *cz_ps;
-
-	ps->hardware.magic = PhwCz_Magic;
-
-	cz_ps = cast_PhwCzPowerState(&(ps->hardware));
-
-	result = pp_tables_get_entry(hwmgr, entry, ps,
-			cz_dpm_get_pp_table_entry_callback);
-
-	cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
-	cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
-
-	return result;
-}
-
-static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-	return sizeof(struct cz_power_state);
-}
-
-static void cz_hw_print_display_cfg(
-	const struct cc6_settings *cc6_settings)
-{
-	PP_DBG_LOG("New Display Configuration:\n");
-
-	PP_DBG_LOG("   cpu_cc6_disable: %d\n",
-			cc6_settings->cpu_cc6_disable);
-	PP_DBG_LOG("   cpu_pstate_disable: %d\n",
-			cc6_settings->cpu_pstate_disable);
-	PP_DBG_LOG("   nb_pstate_switch_disable: %d\n",
-			cc6_settings->nb_pstate_switch_disable);
-	PP_DBG_LOG("   cpu_pstate_separation_time: %d\n\n",
-			cc6_settings->cpu_pstate_separation_time);
-}
-
- static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr)
-{
-	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-	uint32_t data = 0;
-
-	if (hw_data->cc6_settings.cc6_setting_changed) {
-
-		hw_data->cc6_settings.cc6_setting_changed = false;
-
-		cz_hw_print_display_cfg(&hw_data->cc6_settings);
-
-		data |= (hw_data->cc6_settings.cpu_pstate_separation_time
-			& PWRMGT_SEPARATION_TIME_MASK)
-			<< PWRMGT_SEPARATION_TIME_SHIFT;
-
-		data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
-			<< PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
-
-		data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
-			<< PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
-
-		PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
-			data);
-
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-						PPSMC_MSG_SetDisplaySizePowerParams,
-						data);
-	}
-
-	return 0;
-}
-
-
-static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
-			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
-{
-	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
-	if (separation_time !=
-	    hw_data->cc6_settings.cpu_pstate_separation_time ||
-	    cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
-	    pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
-	    pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
-
-		hw_data->cc6_settings.cc6_setting_changed = true;
-
-		hw_data->cc6_settings.cpu_pstate_separation_time =
-			separation_time;
-		hw_data->cc6_settings.cpu_cc6_disable =
-			cc6_disable;
-		hw_data->cc6_settings.cpu_pstate_disable =
-			pstate_disable;
-		hw_data->cc6_settings.nb_pstate_switch_disable =
-			pstate_switch_disable;
-
-	}
-
-	return 0;
-}
-
-static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
-		struct amd_pp_simple_clock_info *info)
-{
-	uint32_t i;
-	const struct phm_clock_voltage_dependency_table *table =
-			hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
-	const struct phm_clock_and_voltage_limits *limits =
-			&hwmgr->dyn_state.max_clock_voltage_on_ac;
-
-	info->engine_max_clock = limits->sclk;
-	info->memory_max_clock = limits->mclk;
-
-	for (i = table->count - 1; i > 0; i--) {
-		if (limits->vddc >= table->entries[i].v) {
-			info->level = table->entries[i].clk;
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-
-static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, uint32_t mask)
-{
-	switch (type) {
-	case PP_SCLK:
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMin,
-				mask);
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetSclkSoftMax,
-				mask);
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, char *buf)
-{
-	struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
-	struct phm_clock_voltage_dependency_table *sclk_table =
-			hwmgr->dyn_state.vddc_dependency_on_sclk;
-	int i, now, size = 0;
-
-	switch (type) {
-	case PP_SCLK:
-		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC,
-				ixTARGET_AND_CURRENT_PROFILE_INDEX),
-				TARGET_AND_CURRENT_PROFILE_INDEX,
-				CURR_SCLK_INDEX);
-
-		for (i = 0; i < sclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i, sclk_table->entries[i].clk / 100,
-					(i == now) ? "*" : "");
-		break;
-	case PP_MCLK:
-		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC,
-				ixTARGET_AND_CURRENT_PROFILE_INDEX),
-				TARGET_AND_CURRENT_PROFILE_INDEX,
-				CURR_MCLK_INDEX);
-
-		for (i = CZ_NUM_NBPMEMORYCLOCK; i > 0; i--)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					CZ_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
-					(CZ_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
-		break;
-	default:
-		break;
-	}
-	return size;
-}
-
-static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
-				PHM_PerformanceLevelDesignation designation, uint32_t index,
-				PHM_PerformanceLevel *level)
-{
-	const struct cz_power_state *ps;
-	struct cz_hwmgr *data;
-	uint32_t level_index;
-	uint32_t i;
-
-	if (level == NULL || hwmgr == NULL || state == NULL)
-		return -EINVAL;
-
-	data = (struct cz_hwmgr *)(hwmgr->backend);
-	ps = cast_const_PhwCzPowerState(state);
-
-	level_index = index > ps->level - 1 ? ps->level - 1 : index;
-	level->coreClock = ps->levels[level_index].engineClock;
-
-	if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
-		for (i = 1; i < ps->level; i++) {
-			if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
-				level->coreClock = ps->levels[i].engineClock;
-				break;
-			}
-		}
-	}
-
-	if (level_index == 0)
-		level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1];
-	else
-		level->memory_clock = data->sys_info.nbp_memory_clock[0];
-
-	level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
-	level->nonLocalMemoryFreq = 0;
-	level->nonLocalMemoryWidth = 0;
-
-	return 0;
-}
-
-static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
-	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
-{
-	const struct cz_power_state *ps = cast_const_PhwCzPowerState(state);
-
-	clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
-	clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
-
-	return 0;
-}
-
-static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
-						struct amd_pp_clocks *clocks)
-{
-	struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
-	int i;
-	struct phm_clock_voltage_dependency_table *table;
-
-	clocks->count = cz_get_max_sclk_level(hwmgr);
-	switch (type) {
-	case amd_pp_disp_clock:
-		for (i = 0; i < clocks->count; i++)
-			clocks->clock[i] = data->sys_info.display_clock[i];
-		break;
-	case amd_pp_sys_clock:
-		table = hwmgr->dyn_state.vddc_dependency_on_sclk;
-		for (i = 0; i < clocks->count; i++)
-			clocks->clock[i] = table->entries[i].clk;
-		break;
-	case amd_pp_mem_clock:
-		clocks->count = CZ_NUM_NBPMEMORYCLOCK;
-		for (i = 0; i < clocks->count; i++)
-			clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
-		break;
-	default:
-		return -1;
-	}
-
-	return 0;
-}
-
-static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
-{
-	struct phm_clock_voltage_dependency_table *table =
-					hwmgr->dyn_state.vddc_dependency_on_sclk;
-	unsigned long level;
-	const struct phm_clock_and_voltage_limits *limits =
-			&hwmgr->dyn_state.max_clock_voltage_on_ac;
-
-	if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
-		return -EINVAL;
-
-	level = cz_get_max_sclk_level(hwmgr) - 1;
-
-	if (level < table->count)
-		clocks->engine_max_clock = table->entries[level].clk;
-	else
-		clocks->engine_max_clock = table->entries[table->count - 1].clk;
-
-	clocks->memory_max_clock = limits->mclk;
-
-	return 0;
-}
-
-static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
-	int actual_temp = 0;
-	uint32_t val = cgs_read_ind_register(hwmgr->device,
-					     CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
-	uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
-
-	if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
-		actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-	else
-		actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-	return actual_temp;
-}
-
-static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
-			  void *value, int *size)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	struct phm_clock_voltage_dependency_table *table =
-				hwmgr->dyn_state.vddc_dependency_on_sclk;
-
-	struct phm_vce_clock_voltage_dependency_table *vce_table =
-		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
-	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
-		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
-	uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
-					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
-	uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
-					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
-	uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
-					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
-
-	uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
-	uint16_t vddnb, vddgfx;
-	int result;
-
-	/* size must be at least 4 bytes for all sensors */
-	if (*size < 4)
-		return -EINVAL;
-	*size = 4;
-
-	switch (idx) {
-	case AMDGPU_PP_SENSOR_GFX_SCLK:
-		if (sclk_index < NUM_SCLK_LEVELS) {
-			sclk = table->entries[sclk_index].clk;
-			*((uint32_t *)value) = sclk;
-			return 0;
-		}
-		return -EINVAL;
-	case AMDGPU_PP_SENSOR_VDDNB:
-		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
-			CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
-		vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
-		*((uint32_t *)value) = vddnb;
-		return 0;
-	case AMDGPU_PP_SENSOR_VDDGFX:
-		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
-			CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
-		vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
-		*((uint32_t *)value) = vddgfx;
-		return 0;
-	case AMDGPU_PP_SENSOR_UVD_VCLK:
-		if (!cz_hwmgr->uvd_power_gated) {
-			if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
-				return -EINVAL;
-			} else {
-				vclk = uvd_table->entries[uvd_index].vclk;
-				*((uint32_t *)value) = vclk;
-				return 0;
-			}
-		}
-		*((uint32_t *)value) = 0;
-		return 0;
-	case AMDGPU_PP_SENSOR_UVD_DCLK:
-		if (!cz_hwmgr->uvd_power_gated) {
-			if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
-				return -EINVAL;
-			} else {
-				dclk = uvd_table->entries[uvd_index].dclk;
-				*((uint32_t *)value) = dclk;
-				return 0;
-			}
-		}
-		*((uint32_t *)value) = 0;
-		return 0;
-	case AMDGPU_PP_SENSOR_VCE_ECCLK:
-		if (!cz_hwmgr->vce_power_gated) {
-			if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
-				return -EINVAL;
-			} else {
-				ecclk = vce_table->entries[vce_index].ecclk;
-				*((uint32_t *)value) = ecclk;
-				return 0;
-			}
-		}
-		*((uint32_t *)value) = 0;
-		return 0;
-	case AMDGPU_PP_SENSOR_GPU_LOAD:
-		result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
-		if (0 == result) {
-			activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
-			activity_percent = activity_percent > 100 ? 100 : activity_percent;
-		} else {
-			activity_percent = 50;
-		}
-		*((uint32_t *)value) = activity_percent;
-		return 0;
-	case AMDGPU_PP_SENSOR_UVD_POWER:
-		*((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1;
-		return 0;
-	case AMDGPU_PP_SENSOR_VCE_POWER:
-		*((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1;
-		return 0;
-	case AMDGPU_PP_SENSOR_GPU_TEMP:
-		*((uint32_t *)value) = cz_thermal_get_temperature(hwmgr);
-		return 0;
-	default:
-		return -EINVAL;
-	}
-}
-
-static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
-					uint32_t virtual_addr_low,
-					uint32_t virtual_addr_hi,
-					uint32_t mc_addr_low,
-					uint32_t mc_addr_hi,
-					uint32_t size)
-{
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DramAddrHiVirtual,
-					mc_addr_hi);
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DramAddrLoVirtual,
-					mc_addr_low);
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DramAddrHiPhysical,
-					virtual_addr_hi);
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DramAddrLoPhysical,
-					virtual_addr_low);
-
-	smum_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DramBufferSize,
-					size);
-	return 0;
-}
-
-static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
-		struct PP_TemperatureRange *thermal_data)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
-
-	thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
-			cz_hwmgr->sys_info.htc_hyst_lmt) *
-			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-	return 0;
-}
-
-static const struct pp_hwmgr_func cz_hwmgr_funcs = {
-	.backend_init = cz_hwmgr_backend_init,
-	.backend_fini = cz_hwmgr_backend_fini,
-	.apply_state_adjust_rules = cz_apply_state_adjust_rules,
-	.force_dpm_level = cz_dpm_force_dpm_level,
-	.get_power_state_size = cz_get_power_state_size,
-	.powerdown_uvd = cz_dpm_powerdown_uvd,
-	.powergate_uvd = cz_dpm_powergate_uvd,
-	.powergate_vce = cz_dpm_powergate_vce,
-	.get_mclk = cz_dpm_get_mclk,
-	.get_sclk = cz_dpm_get_sclk,
-	.patch_boot_state = cz_dpm_patch_boot_state,
-	.get_pp_table_entry = cz_dpm_get_pp_table_entry,
-	.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
-	.set_cpu_power_state = cz_set_cpu_power_state,
-	.store_cc6_data = cz_store_cc6_data,
-	.force_clock_level = cz_force_clock_level,
-	.print_clock_levels = cz_print_clock_levels,
-	.get_dal_power_level = cz_get_dal_power_level,
-	.get_performance_level = cz_get_performance_level,
-	.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
-	.get_clock_by_type = cz_get_clock_by_type,
-	.get_max_high_clocks = cz_get_max_high_clocks,
-	.read_sensor = cz_read_sensor,
-	.power_off_asic = cz_power_off_asic,
-	.asic_setup = cz_setup_asic_task,
-	.dynamic_state_management_enable = cz_enable_dpm_tasks,
-	.power_state_set = cz_set_power_state_tasks,
-	.dynamic_state_management_disable = cz_disable_dpm_tasks,
-	.notify_cac_buffer_info = cz_notify_cac_buffer_info,
-	.get_thermal_temperature_range = cz_get_thermal_temperature_range,
-};
-
-int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
-{
-	hwmgr->hwmgr_func = &cz_hwmgr_funcs;
-	hwmgr->pptable_func = &pptable_funcs;
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
deleted file mode 100644
index b56720a..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _CZ_HWMGR_H_
-#define _CZ_HWMGR_H_
-
-#include "cgs_common.h"
-#include "ppatomctrl.h"
-
-#define CZ_NUM_NBPSTATES               4
-#define CZ_NUM_NBPMEMORYCLOCK          2
-#define MAX_DISPLAY_CLOCK_LEVEL        8
-#define CZ_MAX_HARDWARE_POWERLEVELS    8
-#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0   0x3FFFC102
-#define CZ_MIN_DEEP_SLEEP_SCLK         800
-
-/* Carrizo device IDs */
-#define DEVICE_ID_CZ_9870             0x9870
-#define DEVICE_ID_CZ_9874             0x9874
-#define DEVICE_ID_CZ_9875             0x9875
-#define DEVICE_ID_CZ_9876             0x9876
-#define DEVICE_ID_CZ_9877             0x9877
-
-#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value)                            \
-		cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value)
-
-struct cz_dpm_entry {
-	uint32_t soft_min_clk;
-	uint32_t hard_min_clk;
-	uint32_t soft_max_clk;
-	uint32_t hard_max_clk;
-};
-
-struct cz_sys_info {
-	uint32_t bootup_uma_clock;
-	uint32_t bootup_engine_clock;
-	uint32_t dentist_vco_freq;
-	uint32_t nb_dpm_enable;
-	uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK];
-	uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
-	uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES];
-	uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL];
-	uint16_t bootup_nb_voltage_index;
-	uint8_t htc_tmp_lmt;
-	uint8_t htc_hyst_lmt;
-	uint32_t system_config;
-	uint32_t uma_channel_number;
-};
-
-#define MAX_DISPLAYPHY_IDS			0x8
-#define DISPLAYPHY_LANEMASK			0xF
-#define UNKNOWN_TRANSMITTER_PHY_ID		(-1)
-
-#define DISPLAYPHY_PHYID_SHIFT			24
-#define DISPLAYPHY_LANESELECT_SHIFT		16
-
-#define DISPLAYPHY_RX_SELECT			0x1
-#define DISPLAYPHY_TX_SELECT			0x2
-#define DISPLAYPHY_CORE_SELECT			0x4
-
-#define DDI_POWERGATING_ARG(phyID, lanemask, rx, tx, core) \
-		(((uint32_t)(phyID))<<DISPLAYPHY_PHYID_SHIFT | \
-		((uint32_t)(lanemask))<<DISPLAYPHY_LANESELECT_SHIFT | \
-		((rx) ? DISPLAYPHY_RX_SELECT : 0) | \
-		((tx) ? DISPLAYPHY_TX_SELECT : 0) | \
-		((core) ? DISPLAYPHY_CORE_SELECT : 0))
-
-struct cz_display_phy_info_entry {
-	uint8_t phy_present;
-	uint8_t active_lane_mapping;
-	uint8_t display_config_type;
-	uint8_t active_number_of_lanes;
-};
-
-#define CZ_MAX_DISPLAYPHY_IDS			10
-
-struct cz_display_phy_info {
-	bool display_phy_access_initialized;
-	struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS];
-};
-
-struct cz_power_level {
-	uint32_t engineClock;
-	uint8_t vddcIndex;
-	uint8_t dsDividerIndex;
-	uint8_t ssDividerIndex;
-	uint8_t allowGnbSlow;
-	uint8_t forceNBPstate;
-	uint8_t display_wm;
-	uint8_t vce_wm;
-	uint8_t numSIMDToPowerDown;
-	uint8_t hysteresis_up;
-	uint8_t rsv[3];
-};
-
-struct cz_uvd_clocks {
-	uint32_t vclk;
-	uint32_t dclk;
-	uint32_t vclk_low_divider;
-	uint32_t vclk_high_divider;
-	uint32_t dclk_low_divider;
-	uint32_t dclk_high_divider;
-};
-
-enum cz_pstate_previous_action {
-	DO_NOTHING = 1,
-	FORCE_HIGH,
-	CANCEL_FORCE_HIGH
-};
-
-struct pp_disable_nb_ps_flags {
-	union {
-		struct {
-			uint32_t entry : 1;
-			uint32_t display : 1;
-			uint32_t driver: 1;
-			uint32_t vce : 1;
-			uint32_t uvd : 1;
-			uint32_t acp : 1;
-			uint32_t reserved: 26;
-		} bits;
-		uint32_t u32All;
-	};
-};
-
-struct cz_power_state {
-	unsigned int magic;
-	uint32_t level;
-	struct cz_uvd_clocks uvd_clocks;
-	uint32_t evclk;
-	uint32_t ecclk;
-	uint32_t samclk;
-	uint32_t acpclk;
-	bool need_dfs_bypass;
-	uint32_t nbps_flags;
-	uint32_t bapm_flags;
-	uint8_t dpm_0_pg_nb_ps_low;
-	uint8_t dpm_0_pg_nb_ps_high;
-	uint8_t dpm_x_nb_ps_low;
-	uint8_t dpm_x_nb_ps_high;
-	enum cz_pstate_previous_action action;
-	struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS];
-	struct pp_disable_nb_ps_flags disable_nb_ps_flag;
-};
-
-#define DPMFlags_SCLK_Enabled			0x00000001
-#define DPMFlags_UVD_Enabled			0x00000002
-#define DPMFlags_VCE_Enabled			0x00000004
-#define DPMFlags_ACP_Enabled			0x00000008
-#define DPMFlags_ForceHighestValid		0x40000000
-#define DPMFlags_Debug				0x80000000
-
-#define SMU_EnabledFeatureScoreboard_AcpDpmOn   0x00000001 /* bit 0 */
-#define SMU_EnabledFeatureScoreboard_UvdDpmOn   0x00800000 /* bit 23 */
-#define SMU_EnabledFeatureScoreboard_VceDpmOn   0x01000000 /* bit 24 */
-
-struct cc6_settings {
-	bool cc6_setting_changed;
-	bool nb_pstate_switch_disable;/* controls NB PState switch */
-	bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
-	bool cpu_pstate_disable;
-	uint32_t cpu_pstate_separation_time;
-};
-
-struct cz_hwmgr {
-	uint32_t dpm_interval;
-
-	uint32_t voltage_drop_threshold;
-
-	uint32_t voting_rights_clients;
-
-	uint32_t disable_driver_thermal_policy;
-
-	uint32_t static_screen_threshold;
-
-	uint32_t gfx_power_gating_threshold;
-
-	uint32_t activity_hysteresis;
-	uint32_t bootup_sclk_divider;
-	uint32_t gfx_ramp_step;
-	uint32_t gfx_ramp_delay; /* in micro-seconds */
-
-	uint32_t thermal_auto_throttling_treshold;
-
-	struct cz_sys_info sys_info;
-
-	struct cz_power_level boot_power_level;
-	struct cz_power_state *cz_current_ps;
-	struct cz_power_state *cz_requested_ps;
-
-	uint32_t mgcg_cgtt_local0;
-	uint32_t mgcg_cgtt_local1;
-
-	uint32_t tdr_clock; /* in 10khz unit */
-
-	uint32_t ddi_power_gating_disabled;
-	uint32_t disable_gfx_power_gating_in_uvd;
-	uint32_t disable_nb_ps3_in_battery;
-
-	uint32_t lock_nb_ps_in_uvd_play_back;
-
-	struct cz_display_phy_info display_phy_info;
-	uint32_t vce_slow_sclk_threshold; /* default 200mhz */
-	uint32_t dce_slow_sclk_threshold; /* default 300mhz */
-	uint32_t min_sclk_did;  /* minimum sclk divider */
-
-	bool disp_clk_bypass;
-	bool disp_clk_bypass_pending;
-	uint32_t bapm_enabled;
-	uint32_t clock_slow_down_freq;
-	uint32_t skip_clock_slow_down;
-	uint32_t enable_nb_ps_policy;
-	uint32_t voltage_drop_in_dce_power_gating;
-	uint32_t uvd_dpm_interval;
-	uint32_t override_dynamic_mgpg;
-	uint32_t lclk_deep_enabled;
-
-	uint32_t uvd_performance;
-
-	bool video_start;
-	bool battery_state;
-	uint32_t lowest_valid;
-	uint32_t highest_valid;
-	uint32_t high_voltage_threshold;
-	uint32_t is_nb_dpm_enabled;
-	struct cc6_settings cc6_settings;
-	uint32_t is_voltage_island_enabled;
-
-	bool pgacpinit;
-
-	uint8_t disp_config;
-
-	/* PowerTune */
-	uint32_t power_containment_features;
-	bool cac_enabled;
-	bool disable_uvd_power_tune_feature;
-	bool enable_ba_pm_feature;
-	bool enable_tdc_limit_feature;
-
-	uint32_t sram_end;
-	uint32_t dpm_table_start;
-	uint32_t soft_regs_start;
-
-	uint8_t uvd_level_count;
-	uint8_t vce_level_count;
-
-	uint8_t acp_level_count;
-	uint8_t samu_level_count;
-	uint32_t fps_high_threshold;
-	uint32_t fps_low_threshold;
-
-	uint32_t dpm_flags;
-	struct cz_dpm_entry sclk_dpm;
-	struct cz_dpm_entry uvd_dpm;
-	struct cz_dpm_entry vce_dpm;
-	struct cz_dpm_entry acp_dpm;
-
-	uint8_t uvd_boot_level;
-	uint8_t vce_boot_level;
-	uint8_t acp_boot_level;
-	uint8_t samu_boot_level;
-	uint8_t uvd_interval;
-	uint8_t vce_interval;
-	uint8_t acp_interval;
-	uint8_t samu_interval;
-
-	uint8_t graphics_interval;
-	uint8_t graphics_therm_throttle_enable;
-	uint8_t graphics_voltage_change_enable;
-
-	uint8_t graphics_clk_slow_enable;
-	uint8_t graphics_clk_slow_divider;
-
-	uint32_t display_cac;
-	uint32_t low_sclk_interrupt_threshold;
-
-	uint32_t dram_log_addr_h;
-	uint32_t dram_log_addr_l;
-	uint32_t dram_log_phy_addr_h;
-	uint32_t dram_log_phy_addr_l;
-	uint32_t dram_log_buff_size;
-
-	bool uvd_power_gated;
-	bool vce_power_gated;
-	bool samu_power_gated;
-	bool acp_power_gated;
-	bool acp_power_up_no_dsp;
-	uint32_t active_process_mask;
-
-	uint32_t max_sclk_level;
-	uint32_t num_of_clk_entries;
-};
-
-struct pp_hwmgr;
-
-int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
-int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
-#endif /* _CZ_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.c
new file mode 100644
index 0000000..c217735
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "hwmgr.h"
+#include "smu8_clockpowergating.h"
+#include "cz_ppsmc.h"
+
+/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
+    0    GFX0L (3:0),                  (27:24),
+    1    GFX0H (7:4),                  (31:28),
+    2    GFX1L (3:0),                  (19:16),
+    3    GFX1H (7:4),                  (23:20),
+    4    DDIL   (3:0),                   (11: 8),
+    5    DDIH  (7:4),                   (15:12),
+    6    DDI2L (3:0),                   ( 3: 0),
+    7    DDI2H (7:4),                   ( 7: 4),
+*/
+#define DDI_PHY_GEN_STATUS_VAL(phyID)   (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
+#define IS_PHY_ID_USED_BY_PLL(PhyID)    (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
+
+
+int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
+{
+	int ret = 0;
+
+	switch (block) {
+	case PHM_AsicBlock_UVD_MVC:
+	case PHM_AsicBlock_UVD:
+	case PHM_AsicBlock_UVD_HD:
+	case PHM_AsicBlock_UVD_SD:
+		if (gating == PHM_ClockGateSetting_StaticOff)
+			ret = cz_dpm_powerdown_uvd(hwmgr);
+		else
+			ret = cz_dpm_powerup_uvd(hwmgr);
+		break;
+	case PHM_AsicBlock_GFX:
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+
+bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
+{
+	return true;
+}
+
+
+int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+{
+	return 0;
+}
+
+int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
+{
+	/* TODO */
+	return 0;
+}
+
+int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
+{
+	/* TODO */
+	return 0;
+}
+
+int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
+{
+	/* TODO */
+	return 0;
+}
+
+int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
+{
+	/* TODO */
+	return 0;
+}
+
+int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
+{
+	/* TODO */
+	return 0;
+}
+
+int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	uint32_t dpm_features = 0;
+
+	if (enable &&
+		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				  PHM_PlatformCaps_UVDDPM)) {
+		cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
+		dpm_features |= UVD_DPM_MASK;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			    PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+	} else {
+		dpm_features |= UVD_DPM_MASK;
+		cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			   PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+	}
+	return 0;
+}
+
+int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	uint32_t dpm_features = 0;
+
+	if (enable && phm_cap_enabled(
+				hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_VCEDPM)) {
+		cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
+		dpm_features |= VCE_DPM_MASK;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			    PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+	} else {
+		dpm_features |= VCE_DPM_MASK;
+		cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			   PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+	}
+
+	return 0;
+}
+
+
+void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	cz_hwmgr->uvd_power_gated = bgate;
+
+	if (bgate) {
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_GATE);
+		cz_dpm_update_uvd_dpm(hwmgr, true);
+		cz_dpm_powerdown_uvd(hwmgr);
+	} else {
+		cz_dpm_powerup_uvd(hwmgr);
+		cgs_set_clockgating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_PG_STATE_UNGATE);
+		cz_dpm_update_uvd_dpm(hwmgr, false);
+	}
+
+}
+
+void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	if (bgate) {
+		cgs_set_powergating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_PG_STATE_GATE);
+		cgs_set_clockgating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_CG_STATE_GATE);
+		cz_enable_disable_vce_dpm(hwmgr, false);
+		cz_dpm_powerdown_vce(hwmgr);
+		cz_hwmgr->vce_power_gated = true;
+	} else {
+		cz_dpm_powerup_vce(hwmgr);
+		cz_hwmgr->vce_power_gated = false;
+		cgs_set_clockgating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(
+					hwmgr->device,
+					AMD_IP_BLOCK_TYPE_VCE,
+					AMD_PG_STATE_UNGATE);
+		cz_dpm_update_vce_dpm(hwmgr);
+		cz_enable_disable_vce_dpm(hwmgr, true);
+	}
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.h
new file mode 100644
index 0000000..0981eeb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_clockpowergating.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _CZ_CLOCK_POWER_GATING_H_
+#define _CZ_CLOCK_POWER_GATING_H_
+
+#include "smu8_hwmgr.h"
+#include "pp_asicblocks.h"
+
+extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
+extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
+extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
+#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
new file mode 100644
index 0000000..1a03d71
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -0,0 +1,1886 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "pp_debug.h"
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "atom-types.h"
+#include "atombios.h"
+#include "processpptables.h"
+#include "cgs_common.h"
+#include "smu/smu_8_0_d.h"
+#include "smu8_fusion.h"
+#include "smu/smu_8_0_sh_mask.h"
+#include "smumgr.h"
+#include "hwmgr.h"
+#include "hardwaremanager.h"
+#include "cz_ppsmc.h"
+#include "smu8_hwmgr.h"
+#include "power_state.h"
+#include "smu8_clockpowergating.h"
+#include "pp_thermal.h"
+
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID  0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
+static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic;
+
+static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps)
+{
+	if (PhwCz_Magic != hw_ps->magic)
+		return NULL;
+
+	return (struct cz_power_state *)hw_ps;
+}
+
+static const struct cz_power_state *cast_const_PhwCzPowerState(
+				const struct pp_hw_power_state *hw_ps)
+{
+	if (PhwCz_Magic != hw_ps->magic)
+		return NULL;
+
+	return (struct cz_power_state *)hw_ps;
+}
+
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+					uint32_t clock, uint32_t msg)
+{
+	int i = 0;
+	struct phm_vce_clock_voltage_dependency_table *ptable =
+		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+	switch (msg) {
+	case PPSMC_MSG_SetEclkSoftMin:
+	case PPSMC_MSG_SetEclkHardMin:
+		for (i = 0; i < (int)ptable->count; i++) {
+			if (clock <= ptable->entries[i].ecclk)
+				break;
+		}
+		break;
+
+	case PPSMC_MSG_SetEclkSoftMax:
+	case PPSMC_MSG_SetEclkHardMax:
+		for (i = ptable->count - 1; i >= 0; i--) {
+			if (clock >= ptable->entries[i].ecclk)
+				break;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return i;
+}
+
+static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
+				uint32_t clock, uint32_t msg)
+{
+	int i = 0;
+	struct phm_clock_voltage_dependency_table *table =
+				hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+	switch (msg) {
+	case PPSMC_MSG_SetSclkSoftMin:
+	case PPSMC_MSG_SetSclkHardMin:
+		for (i = 0; i < (int)table->count; i++) {
+			if (clock <= table->entries[i].clk)
+				break;
+		}
+		break;
+
+	case PPSMC_MSG_SetSclkSoftMax:
+	case PPSMC_MSG_SetSclkHardMax:
+		for (i = table->count - 1; i >= 0; i--) {
+			if (clock >= table->entries[i].clk)
+				break;
+		}
+		break;
+
+	default:
+		break;
+	}
+	return i;
+}
+
+static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
+					uint32_t clock, uint32_t msg)
+{
+	int i = 0;
+	struct phm_uvd_clock_voltage_dependency_table *ptable =
+		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+	switch (msg) {
+	case PPSMC_MSG_SetUvdSoftMin:
+	case PPSMC_MSG_SetUvdHardMin:
+		for (i = 0; i < (int)ptable->count; i++) {
+			if (clock <= ptable->entries[i].vclk)
+				break;
+		}
+		break;
+
+	case PPSMC_MSG_SetUvdSoftMax:
+	case PPSMC_MSG_SetUvdHardMax:
+		for (i = ptable->count - 1; i >= 0; i--) {
+			if (clock >= ptable->entries[i].vclk)
+				break;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return i;
+}
+
+static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	if (cz_hwmgr->max_sclk_level == 0) {
+		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
+		cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
+	}
+
+	return cz_hwmgr->max_sclk_level;
+}
+
+static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	cz_hwmgr->gfx_ramp_step = 256*25/100;
+	cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
+
+	cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
+	cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
+	cz_hwmgr->clock_slow_down_freq = 25000;
+	cz_hwmgr->skip_clock_slow_down = 1;
+	cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
+	cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
+	cz_hwmgr->voting_rights_clients = 0x00C00033;
+	cz_hwmgr->static_screen_threshold = 8;
+	cz_hwmgr->ddi_power_gating_disabled = 0;
+	cz_hwmgr->bapm_enabled = 1;
+	cz_hwmgr->voltage_drop_threshold = 0;
+	cz_hwmgr->gfx_power_gating_threshold = 500;
+	cz_hwmgr->vce_slow_sclk_threshold = 20000;
+	cz_hwmgr->dce_slow_sclk_threshold = 30000;
+	cz_hwmgr->disable_driver_thermal_policy = 1;
+	cz_hwmgr->disable_nb_ps3_in_battery = 0;
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+							PHM_PlatformCaps_ABM);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				    PHM_PlatformCaps_NonABMSupportInPPLib);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_DynamicM3Arbiter);
+
+	cz_hwmgr->override_dynamic_mgpg = 1;
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				  PHM_PlatformCaps_DynamicPatchPowerState);
+
+	cz_hwmgr->thermal_auto_throttling_treshold = 0;
+	cz_hwmgr->tdr_clock = 0;
+	cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_DynamicUVDState);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_UVDDPM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_VCEDPM);
+
+	cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
+	cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
+	cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
+	cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0;
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				   PHM_PlatformCaps_DisableVoltageIsland);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_UVDPowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_VCEPowerGating);
+
+	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			      PHM_PlatformCaps_UVDPowerGating);
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			      PHM_PlatformCaps_VCEPowerGating);
+
+
+	return 0;
+}
+
+static uint32_t cz_convert_8Bit_index_to_voltage(
+			struct pp_hwmgr *hwmgr, uint16_t voltage)
+{
+	return 6200 - (voltage * 25);
+}
+
+static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
+			struct phm_clock_and_voltage_limits *table)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
+	struct cz_sys_info *sys_info = &cz_hwmgr->sys_info;
+	struct phm_clock_voltage_dependency_table *dep_table =
+				hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+	if (dep_table->count > 0) {
+		table->sclk = dep_table->entries[dep_table->count-1].clk;
+		table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr,
+		   (uint16_t)dep_table->entries[dep_table->count-1].v);
+	}
+	table->mclk = sys_info->nbp_memory_clock[0];
+	return 0;
+}
+
+static int cz_init_dynamic_state_adjustment_rule_settings(
+			struct pp_hwmgr *hwmgr,
+			ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
+{
+	uint32_t table_size =
+		sizeof(struct phm_clock_voltage_dependency_table) +
+		(7 * sizeof(struct phm_clock_voltage_dependency_record));
+
+	struct phm_clock_voltage_dependency_table *table_clk_vlt =
+					kzalloc(table_size, GFP_KERNEL);
+
+	if (NULL == table_clk_vlt) {
+		pr_err("Can not allocate memory!\n");
+		return -ENOMEM;
+	}
+
+	table_clk_vlt->count = 8;
+	table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
+	table_clk_vlt->entries[0].v = 0;
+	table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
+	table_clk_vlt->entries[1].v = 1;
+	table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
+	table_clk_vlt->entries[2].v = 2;
+	table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
+	table_clk_vlt->entries[3].v = 3;
+	table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
+	table_clk_vlt->entries[4].v = 4;
+	table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
+	table_clk_vlt->entries[5].v = 5;
+	table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
+	table_clk_vlt->entries[6].v = 6;
+	table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
+	table_clk_vlt->entries[7].v = 7;
+	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
+
+	return 0;
+}
+
+static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
+	ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
+	uint32_t i;
+	int result = 0;
+	uint8_t frev, crev;
+	uint16_t size;
+
+	info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table(
+			hwmgr->device,
+			GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
+			&size, &frev, &crev);
+
+	if (crev != 9) {
+		pr_err("Unsupported IGP table: %d %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	if (info == NULL) {
+		pr_err("Could not retrieve the Integrated System Info Table!\n");
+		return -EINVAL;
+	}
+
+	cz_hwmgr->sys_info.bootup_uma_clock =
+				   le32_to_cpu(info->ulBootUpUMAClock);
+
+	cz_hwmgr->sys_info.bootup_engine_clock =
+				le32_to_cpu(info->ulBootUpEngineClock);
+
+	cz_hwmgr->sys_info.dentist_vco_freq =
+				   le32_to_cpu(info->ulDentistVCOFreq);
+
+	cz_hwmgr->sys_info.system_config =
+				     le32_to_cpu(info->ulSystemConfig);
+
+	cz_hwmgr->sys_info.bootup_nb_voltage_index =
+				  le16_to_cpu(info->usBootUpNBVoltage);
+
+	cz_hwmgr->sys_info.htc_hyst_lmt =
+			(info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
+
+	cz_hwmgr->sys_info.htc_tmp_lmt =
+			(info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
+
+	if (cz_hwmgr->sys_info.htc_tmp_lmt <=
+			cz_hwmgr->sys_info.htc_hyst_lmt) {
+		pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
+		return -EINVAL;
+	}
+
+	cz_hwmgr->sys_info.nb_dpm_enable =
+				cz_hwmgr->enable_nb_ps_policy &&
+				(le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
+
+	for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
+		if (i < CZ_NUM_NBPMEMORYCLOCK) {
+			cz_hwmgr->sys_info.nbp_memory_clock[i] =
+			  le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
+		}
+		cz_hwmgr->sys_info.nbp_n_clock[i] =
+			    le32_to_cpu(info->ulNbpStateNClkFreq[i]);
+	}
+
+	for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
+		cz_hwmgr->sys_info.display_clock[i] =
+					le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
+	}
+
+	/* Here use 4 levels, make sure not exceed */
+	for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
+		cz_hwmgr->sys_info.nbp_voltage_index[i] =
+			     le16_to_cpu(info->usNBPStateVoltage[i]);
+	}
+
+	if (!cz_hwmgr->sys_info.nb_dpm_enable) {
+		for (i = 1; i < CZ_NUM_NBPSTATES; i++) {
+			if (i < CZ_NUM_NBPMEMORYCLOCK) {
+				cz_hwmgr->sys_info.nbp_memory_clock[i] =
+				    cz_hwmgr->sys_info.nbp_memory_clock[0];
+			}
+			cz_hwmgr->sys_info.nbp_n_clock[i] =
+				    cz_hwmgr->sys_info.nbp_n_clock[0];
+			cz_hwmgr->sys_info.nbp_voltage_index[i] =
+				    cz_hwmgr->sys_info.nbp_voltage_index[0];
+		}
+	}
+
+	if (le32_to_cpu(info->ulGPUCapInfo) &
+		SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				    PHM_PlatformCaps_EnableDFSBypass);
+	}
+
+	cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber;
+
+	cz_construct_max_power_limits_table (hwmgr,
+				    &hwmgr->dyn_state.max_clock_voltage_on_ac);
+
+	cz_init_dynamic_state_adjustment_rule_settings(hwmgr,
+				    &info->sDISPCLK_Voltage[0]);
+
+	return result;
+}
+
+static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	cz_hwmgr->boot_power_level.engineClock =
+				cz_hwmgr->sys_info.bootup_engine_clock;
+
+	cz_hwmgr->boot_power_level.vddcIndex =
+			(uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
+
+	cz_hwmgr->boot_power_level.dsDividerIndex = 0;
+	cz_hwmgr->boot_power_level.ssDividerIndex = 0;
+	cz_hwmgr->boot_power_level.allowGnbSlow = 1;
+	cz_hwmgr->boot_power_level.forceNBPstate = 0;
+	cz_hwmgr->boot_power_level.hysteresis_up = 0;
+	cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
+	cz_hwmgr->boot_power_level.display_wm = 0;
+	cz_hwmgr->boot_power_level.vce_wm = 0;
+
+	return 0;
+}
+
+static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
+{
+	struct SMU8_Fusion_ClkTable *clock_table;
+	int ret;
+	uint32_t i;
+	void *table = NULL;
+	pp_atomctrl_clock_dividers_kong dividers;
+
+	struct phm_clock_voltage_dependency_table *vddc_table =
+		hwmgr->dyn_state.vddc_dependency_on_sclk;
+	struct phm_clock_voltage_dependency_table *vdd_gfx_table =
+		hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
+	struct phm_acp_clock_voltage_dependency_table *acp_table =
+		hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+	struct phm_vce_clock_voltage_dependency_table *vce_table =
+		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+	if (!hwmgr->need_pp_table_upload)
+		return 0;
+
+	ret = smum_download_powerplay_table(hwmgr, &table);
+
+	PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
+			    "Fail to get clock table from SMU!", return -EINVAL;);
+
+	clock_table = (struct SMU8_Fusion_ClkTable *)table;
+
+	/* patch clock table */
+	PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+			    "Dependency table entry exceeds max limit!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+			    "Dependency table entry exceeds max limit!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+			    "Dependency table entry exceeds max limit!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+			    "Dependency table entry exceeds max limit!", return -EINVAL;);
+	PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+			    "Dependency table entry exceeds max limit!", return -EINVAL;);
+
+	for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
+
+		/* vddc_sclk */
+		clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
+			(i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
+		clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
+
+		atomctrl_get_engine_pll_dividers_kong(hwmgr,
+						      clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
+						      &dividers);
+
+		clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
+			(uint8_t)dividers.pll_post_divider;
+
+		/* vddgfx_sclk */
+		clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
+
+		/* acp breakdown */
+		clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
+		clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
+
+		atomctrl_get_engine_pll_dividers_kong(hwmgr,
+						      clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
+						      &dividers);
+
+		clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
+			(uint8_t)dividers.pll_post_divider;
+
+
+		/* uvd breakdown */
+		clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
+		clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
+
+		atomctrl_get_engine_pll_dividers_kong(hwmgr,
+						      clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
+						      &dividers);
+
+		clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
+			(uint8_t)dividers.pll_post_divider;
+
+		clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
+		clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
+
+		atomctrl_get_engine_pll_dividers_kong(hwmgr,
+						      clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
+						      &dividers);
+
+		clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
+			(uint8_t)dividers.pll_post_divider;
+
+		/* vce breakdown */
+		clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
+		clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
+
+
+		atomctrl_get_engine_pll_dividers_kong(hwmgr,
+						      clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
+						      &dividers);
+
+		clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
+			(uint8_t)dividers.pll_post_divider;
+
+	}
+	ret = smum_upload_powerplay_table(hwmgr);
+
+	return ret;
+}
+
+static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_clock_voltage_dependency_table *table =
+					hwmgr->dyn_state.vddc_dependency_on_sclk;
+	unsigned long clock = 0, level;
+
+	if (NULL == table || table->count <= 0)
+		return -EINVAL;
+
+	cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
+	cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
+
+	level = cz_get_max_sclk_level(hwmgr) - 1;
+
+	if (level < table->count)
+		clock = table->entries[level].clk;
+	else
+		clock = table->entries[table->count - 1].clk;
+
+	cz_hwmgr->sclk_dpm.soft_max_clk = clock;
+	cz_hwmgr->sclk_dpm.hard_max_clk = clock;
+
+	return 0;
+}
+
+static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_uvd_clock_voltage_dependency_table *table =
+				hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+	unsigned long clock = 0, level;
+
+	if (NULL == table || table->count <= 0)
+		return -EINVAL;
+
+	cz_hwmgr->uvd_dpm.soft_min_clk = 0;
+	cz_hwmgr->uvd_dpm.hard_min_clk = 0;
+
+	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
+	level = smum_get_argument(hwmgr);
+
+	if (level < table->count)
+		clock = table->entries[level].vclk;
+	else
+		clock = table->entries[table->count - 1].vclk;
+
+	cz_hwmgr->uvd_dpm.soft_max_clk = clock;
+	cz_hwmgr->uvd_dpm.hard_max_clk = clock;
+
+	return 0;
+}
+
+static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_vce_clock_voltage_dependency_table *table =
+				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+	unsigned long clock = 0, level;
+
+	if (NULL == table || table->count <= 0)
+		return -EINVAL;
+
+	cz_hwmgr->vce_dpm.soft_min_clk = 0;
+	cz_hwmgr->vce_dpm.hard_min_clk = 0;
+
+	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
+	level = smum_get_argument(hwmgr);
+
+	if (level < table->count)
+		clock = table->entries[level].ecclk;
+	else
+		clock = table->entries[table->count - 1].ecclk;
+
+	cz_hwmgr->vce_dpm.soft_max_clk = clock;
+	cz_hwmgr->vce_dpm.hard_max_clk = clock;
+
+	return 0;
+}
+
+static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_acp_clock_voltage_dependency_table *table =
+				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+	unsigned long clock = 0, level;
+
+	if (NULL == table || table->count <= 0)
+		return -EINVAL;
+
+	cz_hwmgr->acp_dpm.soft_min_clk = 0;
+	cz_hwmgr->acp_dpm.hard_min_clk = 0;
+
+	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
+	level = smum_get_argument(hwmgr);
+
+	if (level < table->count)
+		clock = table->entries[level].acpclk;
+	else
+		clock = table->entries[table->count - 1].acpclk;
+
+	cz_hwmgr->acp_dpm.soft_max_clk = clock;
+	cz_hwmgr->acp_dpm.hard_max_clk = clock;
+	return 0;
+}
+
+static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	cz_hwmgr->uvd_power_gated = false;
+	cz_hwmgr->vce_power_gated = false;
+	cz_hwmgr->samu_power_gated = false;
+	cz_hwmgr->acp_power_gated = false;
+	cz_hwmgr->pgacpinit = true;
+}
+
+static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	cz_hwmgr->low_sclk_interrupt_threshold = 0;
+}
+
+static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_clock_voltage_dependency_table *table =
+					hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+	unsigned long clock = 0;
+	unsigned long level;
+	unsigned long stable_pstate_sclk;
+	unsigned long percentage;
+
+	cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
+	level = cz_get_max_sclk_level(hwmgr) - 1;
+
+	if (level < table->count)
+		cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[level].clk;
+	else
+		cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
+
+	clock = hwmgr->display_config.min_core_set_clock;
+	if (clock == 0)
+		pr_debug("min_core_set_clock not set\n");
+
+	if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
+		cz_hwmgr->sclk_dpm.hard_min_clk = clock;
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSclkHardMin,
+						 cz_get_sclk_level(hwmgr,
+					cz_hwmgr->sclk_dpm.hard_min_clk,
+					     PPSMC_MSG_SetSclkHardMin));
+	}
+
+	clock = cz_hwmgr->sclk_dpm.soft_min_clk;
+
+	/* update minimum clocks for Stable P-State feature */
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				     PHM_PlatformCaps_StablePState)) {
+		percentage = 75;
+		/*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table  */
+		stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
+					percentage) / 100;
+
+		if (clock < stable_pstate_sclk)
+			clock = stable_pstate_sclk;
+	}
+
+	if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
+		cz_hwmgr->sclk_dpm.soft_min_clk = clock;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSclkSoftMin,
+						cz_get_sclk_level(hwmgr,
+					cz_hwmgr->sclk_dpm.soft_min_clk,
+					     PPSMC_MSG_SetSclkSoftMin));
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				    PHM_PlatformCaps_StablePState) &&
+			 cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
+		cz_hwmgr->sclk_dpm.soft_max_clk = clock;
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSclkSoftMax,
+						cz_get_sclk_level(hwmgr,
+					cz_hwmgr->sclk_dpm.soft_max_clk,
+					PPSMC_MSG_SetSclkSoftMax));
+	}
+
+	return 0;
+}
+
+static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_SclkDeepSleep)) {
+		uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
+		if (clks == 0)
+			clks = CZ_MIN_DEEP_SLEEP_SCLK;
+
+		PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetMinDeepSleepSclk,
+				clks);
+	}
+
+	return 0;
+}
+
+static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr =
+				  (struct cz_hwmgr *)(hwmgr->backend);
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_SetWatermarkFrequency,
+					cz_hwmgr->sclk_dpm.soft_max_clk);
+
+	return 0;
+}
+
+static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
+{
+	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+	if (hw_data->is_nb_dpm_enabled) {
+		if (enable) {
+			PP_DBG_LOG("enable Low Memory PState.\n");
+
+			return smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_EnableLowMemoryPstate,
+						(lock ? 1 : 0));
+		} else {
+			PP_DBG_LOG("disable Low Memory PState.\n");
+
+			return smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_DisableLowMemoryPstate,
+						(lock ? 1 : 0));
+		}
+	}
+
+	return 0;
+}
+
+static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	unsigned long dpm_features = 0;
+
+	if (cz_hwmgr->is_nb_dpm_enabled) {
+		cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
+		dpm_features |= NB_DPM_MASK;
+		ret = smum_send_msg_to_smc_with_parameter(
+							  hwmgr,
+							  PPSMC_MSG_DisableAllSmuFeatures,
+							  dpm_features);
+		if (ret == 0)
+			cz_hwmgr->is_nb_dpm_enabled = false;
+	}
+
+	return ret;
+}
+
+static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	unsigned long dpm_features = 0;
+
+	if (!cz_hwmgr->is_nb_dpm_enabled) {
+		PP_DBG_LOG("enabling ALL SMU features.\n");
+		dpm_features |= NB_DPM_MASK;
+		ret = smum_send_msg_to_smc_with_parameter(
+							  hwmgr,
+							  PPSMC_MSG_EnableAllSmuFeatures,
+							  dpm_features);
+		if (ret == 0)
+			cz_hwmgr->is_nb_dpm_enabled = true;
+	}
+
+	return ret;
+}
+
+static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
+{
+	bool disable_switch;
+	bool enable_low_mem_state;
+	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+	const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
+	const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state);
+
+	if (hw_data->sys_info.nb_dpm_enable) {
+		disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
+		enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
+
+		if (pnew_state->action == FORCE_HIGH)
+			cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
+		else if (pnew_state->action == CANCEL_FORCE_HIGH)
+			cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
+		else
+			cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
+	}
+	return 0;
+}
+
+static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+	int ret = 0;
+
+	cz_update_sclk_limit(hwmgr);
+	cz_set_deep_sleep_sclk_threshold(hwmgr);
+	cz_set_watermark_threshold(hwmgr);
+	ret = cz_enable_nb_dpm(hwmgr);
+	if (ret)
+		return ret;
+	cz_update_low_mem_pstate(hwmgr, input);
+
+	return 0;
+};
+
+
+static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+	int ret;
+
+	ret = cz_upload_pptable_to_smu(hwmgr);
+	if (ret)
+		return ret;
+	ret = cz_init_sclk_limit(hwmgr);
+	if (ret)
+		return ret;
+	ret = cz_init_uvd_limit(hwmgr);
+	if (ret)
+		return ret;
+	ret = cz_init_vce_limit(hwmgr);
+	if (ret)
+		return ret;
+	ret = cz_init_acp_limit(hwmgr);
+	if (ret)
+		return ret;
+
+	cz_init_power_gate_state(hwmgr);
+	cz_init_sclk_threshold(hwmgr);
+
+	return 0;
+}
+
+static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+	hw_data->disp_clk_bypass_pending = false;
+	hw_data->disp_clk_bypass = false;
+}
+
+static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+	hw_data->is_nb_dpm_enabled = false;
+}
+
+static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+	hw_data->cc6_settings.cc6_setting_changed = false;
+	hw_data->cc6_settings.cpu_pstate_separation_time = 0;
+	hw_data->cc6_settings.cpu_cc6_disable = false;
+	hw_data->cc6_settings.cpu_pstate_disable = false;
+}
+
+static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+	cz_power_up_display_clock_sys_pll(hwmgr);
+	cz_clear_nb_dpm_flag(hwmgr);
+	cz_reset_cc6_data(hwmgr);
+	return 0;
+};
+
+static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+	PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
+				PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
+}
+
+static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+	PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int cz_start_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
+
+	return smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_EnableAllSmuFeatures,
+				SCLK_DPM_MASK);
+}
+
+static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	unsigned long dpm_features = 0;
+
+	if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
+		dpm_features |= SCLK_DPM_MASK;
+		cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
+		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DisableAllSmuFeatures,
+					dpm_features);
+	}
+	return ret;
+}
+
+static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
+	cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMin,
+				cz_get_sclk_level(hwmgr,
+				cz_hwmgr->sclk_dpm.soft_min_clk,
+				PPSMC_MSG_SetSclkSoftMin));
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMax,
+				cz_get_sclk_level(hwmgr,
+				cz_hwmgr->sclk_dpm.soft_max_clk,
+				PPSMC_MSG_SetSclkSoftMax));
+
+	return 0;
+}
+
+static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	cz_hwmgr->acp_boot_level = 0xff;
+}
+
+static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	cz_disable_nb_dpm(hwmgr);
+
+	cz_clear_voting_clients(hwmgr);
+	if (cz_stop_dpm(hwmgr))
+		return -EINVAL;
+
+	return 0;
+};
+
+static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	cz_program_voting_clients(hwmgr);
+	if (cz_start_dpm(hwmgr))
+		return -EINVAL;
+	cz_program_bootup_state(hwmgr);
+	cz_reset_acp_boot_level(hwmgr);
+
+	return 0;
+};
+
+static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+				struct pp_power_state  *prequest_ps,
+			const struct pp_power_state *pcurrent_ps)
+{
+	struct cz_power_state *cz_ps =
+				cast_PhwCzPowerState(&prequest_ps->hardware);
+
+	const struct cz_power_state *cz_current_ps =
+				cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
+
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct PP_Clocks clocks = {0, 0, 0, 0};
+	bool force_high;
+	uint32_t  num_of_active_displays = 0;
+	struct cgs_display_info info = {0};
+
+	cz_ps->need_dfs_bypass = true;
+
+	cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
+
+	clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
+				hwmgr->display_config.min_mem_set_clock :
+				cz_hwmgr->sys_info.nbp_memory_clock[1];
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	num_of_active_displays = info.display_count;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+		clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
+
+	force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
+			|| (num_of_active_displays >= 3);
+
+	cz_ps->action = cz_current_ps->action;
+
+	if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+		cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
+	else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
+		cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
+	else if (!force_high && (cz_ps->action == FORCE_HIGH))
+		cz_ps->action = CANCEL_FORCE_HIGH;
+	else if (force_high && (cz_ps->action != FORCE_HIGH))
+		cz_ps->action = FORCE_HIGH;
+	else
+		cz_ps->action = DO_NOTHING;
+
+	return 0;
+}
+
+static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	struct cz_hwmgr *data;
+
+	data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	hwmgr->backend = data;
+
+	result = cz_initialize_dpm_defaults(hwmgr);
+	if (result != 0) {
+		pr_err("cz_initialize_dpm_defaults failed\n");
+		return result;
+	}
+
+	result = cz_get_system_info_data(hwmgr);
+	if (result != 0) {
+		pr_err("cz_get_system_info_data failed\n");
+		return result;
+	}
+
+	cz_construct_boot_state(hwmgr);
+
+	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  CZ_MAX_HARDWARE_POWERLEVELS;
+
+	return result;
+}
+
+static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr != NULL) {
+		kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+
+		kfree(hwmgr->backend);
+		hwmgr->backend = NULL;
+	}
+	return 0;
+}
+
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_SetSclkSoftMin,
+					cz_get_sclk_level(hwmgr,
+					cz_hwmgr->sclk_dpm.soft_max_clk,
+					PPSMC_MSG_SetSclkSoftMin));
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMax,
+				cz_get_sclk_level(hwmgr,
+				cz_hwmgr->sclk_dpm.soft_max_clk,
+				PPSMC_MSG_SetSclkSoftMax));
+
+	return 0;
+}
+
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_clock_voltage_dependency_table *table =
+				hwmgr->dyn_state.vddc_dependency_on_sclk;
+	unsigned long clock = 0, level;
+
+	if (NULL == table || table->count <= 0)
+		return -EINVAL;
+
+	cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
+	cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
+	hwmgr->pstate_sclk = table->entries[0].clk;
+	hwmgr->pstate_mclk = 0;
+
+	level = cz_get_max_sclk_level(hwmgr) - 1;
+
+	if (level < table->count)
+		clock = table->entries[level].clk;
+	else
+		clock = table->entries[table->count - 1].clk;
+
+	cz_hwmgr->sclk_dpm.soft_max_clk = clock;
+	cz_hwmgr->sclk_dpm.hard_max_clk = clock;
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMin,
+				cz_get_sclk_level(hwmgr,
+				cz_hwmgr->sclk_dpm.soft_min_clk,
+				PPSMC_MSG_SetSclkSoftMin));
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMax,
+				cz_get_sclk_level(hwmgr,
+				cz_hwmgr->sclk_dpm.soft_max_clk,
+				PPSMC_MSG_SetSclkSoftMax));
+
+	return 0;
+}
+
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetSclkSoftMax,
+			cz_get_sclk_level(hwmgr,
+			cz_hwmgr->sclk_dpm.soft_min_clk,
+			PPSMC_MSG_SetSclkSoftMax));
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMin,
+				cz_get_sclk_level(hwmgr,
+				cz_hwmgr->sclk_dpm.soft_min_clk,
+				PPSMC_MSG_SetSclkSoftMin));
+
+	return 0;
+}
+
+static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+				enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		ret = cz_phm_force_dpm_highest(hwmgr);
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		ret = cz_phm_force_dpm_lowest(hwmgr);
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		ret = cz_phm_unforce_dpm_levels(hwmgr);
+		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+{
+	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
+		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
+	return 0;
+}
+
+int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
+{
+	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
+		return smum_send_msg_to_smc_with_parameter(
+			hwmgr,
+			PPSMC_MSG_UVDPowerON,
+			PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
+	}
+
+	return 0;
+}
+
+int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_uvd_clock_voltage_dependency_table *ptable =
+		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+	if (!bgate) {
+		/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
+		if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+		    hwmgr->en_umd_pstate) {
+			cz_hwmgr->uvd_dpm.hard_min_clk =
+				   ptable->entries[ptable->count - 1].vclk;
+
+			smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetUvdHardMin,
+				cz_get_uvd_level(hwmgr,
+					cz_hwmgr->uvd_dpm.hard_min_clk,
+					PPSMC_MSG_SetUvdHardMin));
+
+			cz_enable_disable_uvd_dpm(hwmgr, true);
+		} else {
+			cz_enable_disable_uvd_dpm(hwmgr, true);
+		}
+	} else {
+		cz_enable_disable_uvd_dpm(hwmgr, false);
+	}
+
+	return 0;
+}
+
+int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_vce_clock_voltage_dependency_table *ptable =
+		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+	/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
+	if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+	    hwmgr->en_umd_pstate) {
+		cz_hwmgr->vce_dpm.hard_min_clk =
+				  ptable->entries[ptable->count - 1].ecclk;
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetEclkHardMin,
+			cz_get_eclk_level(hwmgr,
+				cz_hwmgr->vce_dpm.hard_min_clk,
+				PPSMC_MSG_SetEclkHardMin));
+	} else {
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_SetEclkHardMin, 0);
+		/* disable ECLK DPM 0. Otherwise VCE could hang if
+		 * switching SCLK from DPM 0 to 6/7 */
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_SetEclkSoftMin, 1);
+	}
+	return 0;
+}
+
+int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
+{
+	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
+		return smum_send_msg_to_smc(hwmgr,
+						     PPSMC_MSG_VCEPowerOFF);
+	return 0;
+}
+
+int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
+{
+	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
+		return smum_send_msg_to_smc(hwmgr,
+						     PPSMC_MSG_VCEPowerON);
+	return 0;
+}
+
+static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	return cz_hwmgr->sys_info.bootup_uma_clock;
+}
+
+static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct pp_power_state  *ps;
+	struct cz_power_state  *cz_ps;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	cz_ps = cast_PhwCzPowerState(&ps->hardware);
+
+	if (low)
+		return cz_ps->levels[0].engineClock;
+	else
+		return cz_ps->levels[cz_ps->level-1].engineClock;
+}
+
+static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+					struct pp_hw_power_state *hw_ps)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+	struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
+
+	cz_ps->level = 1;
+	cz_ps->nbps_flags = 0;
+	cz_ps->bapm_flags = 0;
+	cz_ps->levels[0] = cz_hwmgr->boot_power_level;
+
+	return 0;
+}
+
+static int cz_dpm_get_pp_table_entry_callback(
+						     struct pp_hwmgr *hwmgr,
+					   struct pp_hw_power_state *hw_ps,
+							  unsigned int index,
+						     const void *clock_info)
+{
+	struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
+
+	const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info;
+
+	struct phm_clock_voltage_dependency_table *table =
+				    hwmgr->dyn_state.vddc_dependency_on_sclk;
+	uint8_t clock_info_index = cz_clock_info->index;
+
+	if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
+		clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
+
+	cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
+	cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
+
+	cz_ps->level = index + 1;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+		cz_ps->levels[index].dsDividerIndex = 5;
+		cz_ps->levels[index].ssDividerIndex = 5;
+	}
+
+	return 0;
+}
+
+static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	unsigned long ret = 0;
+
+	result = pp_tables_get_num_of_entries(hwmgr, &ret);
+
+	return result ? 0 : ret;
+}
+
+static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+		    unsigned long entry, struct pp_power_state *ps)
+{
+	int result;
+	struct cz_power_state *cz_ps;
+
+	ps->hardware.magic = PhwCz_Magic;
+
+	cz_ps = cast_PhwCzPowerState(&(ps->hardware));
+
+	result = pp_tables_get_entry(hwmgr, entry, ps,
+			cz_dpm_get_pp_table_entry_callback);
+
+	cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
+	cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
+
+	return result;
+}
+
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+	return sizeof(struct cz_power_state);
+}
+
+static void cz_hw_print_display_cfg(
+	const struct cc6_settings *cc6_settings)
+{
+	PP_DBG_LOG("New Display Configuration:\n");
+
+	PP_DBG_LOG("   cpu_cc6_disable: %d\n",
+			cc6_settings->cpu_cc6_disable);
+	PP_DBG_LOG("   cpu_pstate_disable: %d\n",
+			cc6_settings->cpu_pstate_disable);
+	PP_DBG_LOG("   nb_pstate_switch_disable: %d\n",
+			cc6_settings->nb_pstate_switch_disable);
+	PP_DBG_LOG("   cpu_pstate_separation_time: %d\n\n",
+			cc6_settings->cpu_pstate_separation_time);
+}
+
+ static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr)
+{
+	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+	uint32_t data = 0;
+
+	if (hw_data->cc6_settings.cc6_setting_changed) {
+
+		hw_data->cc6_settings.cc6_setting_changed = false;
+
+		cz_hw_print_display_cfg(&hw_data->cc6_settings);
+
+		data |= (hw_data->cc6_settings.cpu_pstate_separation_time
+			& PWRMGT_SEPARATION_TIME_MASK)
+			<< PWRMGT_SEPARATION_TIME_SHIFT;
+
+		data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
+			<< PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
+
+		data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
+			<< PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
+
+		PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
+			data);
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetDisplaySizePowerParams,
+						data);
+	}
+
+	return 0;
+}
+
+
+static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
+{
+	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+	if (separation_time !=
+	    hw_data->cc6_settings.cpu_pstate_separation_time ||
+	    cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
+	    pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
+	    pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
+
+		hw_data->cc6_settings.cc6_setting_changed = true;
+
+		hw_data->cc6_settings.cpu_pstate_separation_time =
+			separation_time;
+		hw_data->cc6_settings.cpu_cc6_disable =
+			cc6_disable;
+		hw_data->cc6_settings.cpu_pstate_disable =
+			pstate_disable;
+		hw_data->cc6_settings.nb_pstate_switch_disable =
+			pstate_switch_disable;
+
+	}
+
+	return 0;
+}
+
+static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
+		struct amd_pp_simple_clock_info *info)
+{
+	uint32_t i;
+	const struct phm_clock_voltage_dependency_table *table =
+			hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
+	const struct phm_clock_and_voltage_limits *limits =
+			&hwmgr->dyn_state.max_clock_voltage_on_ac;
+
+	info->engine_max_clock = limits->sclk;
+	info->memory_max_clock = limits->mclk;
+
+	for (i = table->count - 1; i > 0; i--) {
+		if (limits->vddc >= table->entries[i].v) {
+			info->level = table->entries[i].clk;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask)
+{
+	switch (type) {
+	case PP_SCLK:
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMin,
+				mask);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetSclkSoftMax,
+				mask);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
+	struct phm_clock_voltage_dependency_table *sclk_table =
+			hwmgr->dyn_state.vddc_dependency_on_sclk;
+	int i, now, size = 0;
+
+	switch (type) {
+	case PP_SCLK:
+		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC,
+				ixTARGET_AND_CURRENT_PROFILE_INDEX),
+				TARGET_AND_CURRENT_PROFILE_INDEX,
+				CURR_SCLK_INDEX);
+
+		for (i = 0; i < sclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, sclk_table->entries[i].clk / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_MCLK:
+		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC,
+				ixTARGET_AND_CURRENT_PROFILE_INDEX),
+				TARGET_AND_CURRENT_PROFILE_INDEX,
+				CURR_MCLK_INDEX);
+
+		for (i = CZ_NUM_NBPMEMORYCLOCK; i > 0; i--)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					CZ_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
+					(CZ_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+	return size;
+}
+
+static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+				PHM_PerformanceLevelDesignation designation, uint32_t index,
+				PHM_PerformanceLevel *level)
+{
+	const struct cz_power_state *ps;
+	struct cz_hwmgr *data;
+	uint32_t level_index;
+	uint32_t i;
+
+	if (level == NULL || hwmgr == NULL || state == NULL)
+		return -EINVAL;
+
+	data = (struct cz_hwmgr *)(hwmgr->backend);
+	ps = cast_const_PhwCzPowerState(state);
+
+	level_index = index > ps->level - 1 ? ps->level - 1 : index;
+	level->coreClock = ps->levels[level_index].engineClock;
+
+	if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
+		for (i = 1; i < ps->level; i++) {
+			if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
+				level->coreClock = ps->levels[i].engineClock;
+				break;
+			}
+		}
+	}
+
+	if (level_index == 0)
+		level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1];
+	else
+		level->memory_clock = data->sys_info.nbp_memory_clock[0];
+
+	level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
+	level->nonLocalMemoryFreq = 0;
+	level->nonLocalMemoryWidth = 0;
+
+	return 0;
+}
+
+static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
+{
+	const struct cz_power_state *ps = cast_const_PhwCzPowerState(state);
+
+	clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
+	clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
+
+	return 0;
+}
+
+static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+						struct amd_pp_clocks *clocks)
+{
+	struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
+	int i;
+	struct phm_clock_voltage_dependency_table *table;
+
+	clocks->count = cz_get_max_sclk_level(hwmgr);
+	switch (type) {
+	case amd_pp_disp_clock:
+		for (i = 0; i < clocks->count; i++)
+			clocks->clock[i] = data->sys_info.display_clock[i];
+		break;
+	case amd_pp_sys_clock:
+		table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+		for (i = 0; i < clocks->count; i++)
+			clocks->clock[i] = table->entries[i].clk;
+		break;
+	case amd_pp_mem_clock:
+		clocks->count = CZ_NUM_NBPMEMORYCLOCK;
+		for (i = 0; i < clocks->count; i++)
+			clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
+		break;
+	default:
+		return -1;
+	}
+
+	return 0;
+}
+
+static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+{
+	struct phm_clock_voltage_dependency_table *table =
+					hwmgr->dyn_state.vddc_dependency_on_sclk;
+	unsigned long level;
+	const struct phm_clock_and_voltage_limits *limits =
+			&hwmgr->dyn_state.max_clock_voltage_on_ac;
+
+	if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
+		return -EINVAL;
+
+	level = cz_get_max_sclk_level(hwmgr) - 1;
+
+	if (level < table->count)
+		clocks->engine_max_clock = table->entries[level].clk;
+	else
+		clocks->engine_max_clock = table->entries[table->count - 1].clk;
+
+	clocks->memory_max_clock = limits->mclk;
+
+	return 0;
+}
+
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+	int actual_temp = 0;
+	uint32_t val = cgs_read_ind_register(hwmgr->device,
+					     CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+	uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+	if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+		actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	else
+		actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	return actual_temp;
+}
+
+static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+			  void *value, int *size)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	struct phm_clock_voltage_dependency_table *table =
+				hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+	struct phm_vce_clock_voltage_dependency_table *vce_table =
+		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+	uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
+					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+	uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+	uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+
+	uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
+	uint16_t vddnb, vddgfx;
+	int result;
+
+	/* size must be at least 4 bytes for all sensors */
+	if (*size < 4)
+		return -EINVAL;
+	*size = 4;
+
+	switch (idx) {
+	case AMDGPU_PP_SENSOR_GFX_SCLK:
+		if (sclk_index < NUM_SCLK_LEVELS) {
+			sclk = table->entries[sclk_index].clk;
+			*((uint32_t *)value) = sclk;
+			return 0;
+		}
+		return -EINVAL;
+	case AMDGPU_PP_SENSOR_VDDNB:
+		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+			CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+		vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+		*((uint32_t *)value) = vddnb;
+		return 0;
+	case AMDGPU_PP_SENSOR_VDDGFX:
+		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+			CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+		vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+		*((uint32_t *)value) = vddgfx;
+		return 0;
+	case AMDGPU_PP_SENSOR_UVD_VCLK:
+		if (!cz_hwmgr->uvd_power_gated) {
+			if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+				return -EINVAL;
+			} else {
+				vclk = uvd_table->entries[uvd_index].vclk;
+				*((uint32_t *)value) = vclk;
+				return 0;
+			}
+		}
+		*((uint32_t *)value) = 0;
+		return 0;
+	case AMDGPU_PP_SENSOR_UVD_DCLK:
+		if (!cz_hwmgr->uvd_power_gated) {
+			if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+				return -EINVAL;
+			} else {
+				dclk = uvd_table->entries[uvd_index].dclk;
+				*((uint32_t *)value) = dclk;
+				return 0;
+			}
+		}
+		*((uint32_t *)value) = 0;
+		return 0;
+	case AMDGPU_PP_SENSOR_VCE_ECCLK:
+		if (!cz_hwmgr->vce_power_gated) {
+			if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+				return -EINVAL;
+			} else {
+				ecclk = vce_table->entries[vce_index].ecclk;
+				*((uint32_t *)value) = ecclk;
+				return 0;
+			}
+		}
+		*((uint32_t *)value) = 0;
+		return 0;
+	case AMDGPU_PP_SENSOR_GPU_LOAD:
+		result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
+		if (0 == result) {
+			activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
+			activity_percent = activity_percent > 100 ? 100 : activity_percent;
+		} else {
+			activity_percent = 50;
+		}
+		*((uint32_t *)value) = activity_percent;
+		return 0;
+	case AMDGPU_PP_SENSOR_UVD_POWER:
+		*((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1;
+		return 0;
+	case AMDGPU_PP_SENSOR_VCE_POWER:
+		*((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1;
+		return 0;
+	case AMDGPU_PP_SENSOR_GPU_TEMP:
+		*((uint32_t *)value) = cz_thermal_get_temperature(hwmgr);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+					uint32_t virtual_addr_low,
+					uint32_t virtual_addr_hi,
+					uint32_t mc_addr_low,
+					uint32_t mc_addr_hi,
+					uint32_t size)
+{
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DramAddrHiVirtual,
+					mc_addr_hi);
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DramAddrLoVirtual,
+					mc_addr_low);
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DramAddrHiPhysical,
+					virtual_addr_hi);
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DramAddrLoPhysical,
+					virtual_addr_low);
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DramBufferSize,
+					size);
+	return 0;
+}
+
+static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+		struct PP_TemperatureRange *thermal_data)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
+
+	thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
+			cz_hwmgr->sys_info.htc_hyst_lmt) *
+			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	return 0;
+}
+
+static const struct pp_hwmgr_func cz_hwmgr_funcs = {
+	.backend_init = cz_hwmgr_backend_init,
+	.backend_fini = cz_hwmgr_backend_fini,
+	.apply_state_adjust_rules = cz_apply_state_adjust_rules,
+	.force_dpm_level = cz_dpm_force_dpm_level,
+	.get_power_state_size = cz_get_power_state_size,
+	.powerdown_uvd = cz_dpm_powerdown_uvd,
+	.powergate_uvd = cz_dpm_powergate_uvd,
+	.powergate_vce = cz_dpm_powergate_vce,
+	.get_mclk = cz_dpm_get_mclk,
+	.get_sclk = cz_dpm_get_sclk,
+	.patch_boot_state = cz_dpm_patch_boot_state,
+	.get_pp_table_entry = cz_dpm_get_pp_table_entry,
+	.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
+	.set_cpu_power_state = cz_set_cpu_power_state,
+	.store_cc6_data = cz_store_cc6_data,
+	.force_clock_level = cz_force_clock_level,
+	.print_clock_levels = cz_print_clock_levels,
+	.get_dal_power_level = cz_get_dal_power_level,
+	.get_performance_level = cz_get_performance_level,
+	.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
+	.get_clock_by_type = cz_get_clock_by_type,
+	.get_max_high_clocks = cz_get_max_high_clocks,
+	.read_sensor = cz_read_sensor,
+	.power_off_asic = cz_power_off_asic,
+	.asic_setup = cz_setup_asic_task,
+	.dynamic_state_management_enable = cz_enable_dpm_tasks,
+	.power_state_set = cz_set_power_state_tasks,
+	.dynamic_state_management_disable = cz_disable_dpm_tasks,
+	.notify_cac_buffer_info = cz_notify_cac_buffer_info,
+	.get_thermal_temperature_range = cz_get_thermal_temperature_range,
+};
+
+int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
+{
+	hwmgr->hwmgr_func = &cz_hwmgr_funcs;
+	hwmgr->pptable_func = &pptable_funcs;
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
new file mode 100644
index 0000000..b56720a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _CZ_HWMGR_H_
+#define _CZ_HWMGR_H_
+
+#include "cgs_common.h"
+#include "ppatomctrl.h"
+
+#define CZ_NUM_NBPSTATES               4
+#define CZ_NUM_NBPMEMORYCLOCK          2
+#define MAX_DISPLAY_CLOCK_LEVEL        8
+#define CZ_MAX_HARDWARE_POWERLEVELS    8
+#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0   0x3FFFC102
+#define CZ_MIN_DEEP_SLEEP_SCLK         800
+
+/* Carrizo device IDs */
+#define DEVICE_ID_CZ_9870             0x9870
+#define DEVICE_ID_CZ_9874             0x9874
+#define DEVICE_ID_CZ_9875             0x9875
+#define DEVICE_ID_CZ_9876             0x9876
+#define DEVICE_ID_CZ_9877             0x9877
+
+#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value)                            \
+		cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value)
+
+struct cz_dpm_entry {
+	uint32_t soft_min_clk;
+	uint32_t hard_min_clk;
+	uint32_t soft_max_clk;
+	uint32_t hard_max_clk;
+};
+
+struct cz_sys_info {
+	uint32_t bootup_uma_clock;
+	uint32_t bootup_engine_clock;
+	uint32_t dentist_vco_freq;
+	uint32_t nb_dpm_enable;
+	uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK];
+	uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
+	uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES];
+	uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL];
+	uint16_t bootup_nb_voltage_index;
+	uint8_t htc_tmp_lmt;
+	uint8_t htc_hyst_lmt;
+	uint32_t system_config;
+	uint32_t uma_channel_number;
+};
+
+#define MAX_DISPLAYPHY_IDS			0x8
+#define DISPLAYPHY_LANEMASK			0xF
+#define UNKNOWN_TRANSMITTER_PHY_ID		(-1)
+
+#define DISPLAYPHY_PHYID_SHIFT			24
+#define DISPLAYPHY_LANESELECT_SHIFT		16
+
+#define DISPLAYPHY_RX_SELECT			0x1
+#define DISPLAYPHY_TX_SELECT			0x2
+#define DISPLAYPHY_CORE_SELECT			0x4
+
+#define DDI_POWERGATING_ARG(phyID, lanemask, rx, tx, core) \
+		(((uint32_t)(phyID))<<DISPLAYPHY_PHYID_SHIFT | \
+		((uint32_t)(lanemask))<<DISPLAYPHY_LANESELECT_SHIFT | \
+		((rx) ? DISPLAYPHY_RX_SELECT : 0) | \
+		((tx) ? DISPLAYPHY_TX_SELECT : 0) | \
+		((core) ? DISPLAYPHY_CORE_SELECT : 0))
+
+struct cz_display_phy_info_entry {
+	uint8_t phy_present;
+	uint8_t active_lane_mapping;
+	uint8_t display_config_type;
+	uint8_t active_number_of_lanes;
+};
+
+#define CZ_MAX_DISPLAYPHY_IDS			10
+
+struct cz_display_phy_info {
+	bool display_phy_access_initialized;
+	struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS];
+};
+
+struct cz_power_level {
+	uint32_t engineClock;
+	uint8_t vddcIndex;
+	uint8_t dsDividerIndex;
+	uint8_t ssDividerIndex;
+	uint8_t allowGnbSlow;
+	uint8_t forceNBPstate;
+	uint8_t display_wm;
+	uint8_t vce_wm;
+	uint8_t numSIMDToPowerDown;
+	uint8_t hysteresis_up;
+	uint8_t rsv[3];
+};
+
+struct cz_uvd_clocks {
+	uint32_t vclk;
+	uint32_t dclk;
+	uint32_t vclk_low_divider;
+	uint32_t vclk_high_divider;
+	uint32_t dclk_low_divider;
+	uint32_t dclk_high_divider;
+};
+
+enum cz_pstate_previous_action {
+	DO_NOTHING = 1,
+	FORCE_HIGH,
+	CANCEL_FORCE_HIGH
+};
+
+struct pp_disable_nb_ps_flags {
+	union {
+		struct {
+			uint32_t entry : 1;
+			uint32_t display : 1;
+			uint32_t driver: 1;
+			uint32_t vce : 1;
+			uint32_t uvd : 1;
+			uint32_t acp : 1;
+			uint32_t reserved: 26;
+		} bits;
+		uint32_t u32All;
+	};
+};
+
+struct cz_power_state {
+	unsigned int magic;
+	uint32_t level;
+	struct cz_uvd_clocks uvd_clocks;
+	uint32_t evclk;
+	uint32_t ecclk;
+	uint32_t samclk;
+	uint32_t acpclk;
+	bool need_dfs_bypass;
+	uint32_t nbps_flags;
+	uint32_t bapm_flags;
+	uint8_t dpm_0_pg_nb_ps_low;
+	uint8_t dpm_0_pg_nb_ps_high;
+	uint8_t dpm_x_nb_ps_low;
+	uint8_t dpm_x_nb_ps_high;
+	enum cz_pstate_previous_action action;
+	struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS];
+	struct pp_disable_nb_ps_flags disable_nb_ps_flag;
+};
+
+#define DPMFlags_SCLK_Enabled			0x00000001
+#define DPMFlags_UVD_Enabled			0x00000002
+#define DPMFlags_VCE_Enabled			0x00000004
+#define DPMFlags_ACP_Enabled			0x00000008
+#define DPMFlags_ForceHighestValid		0x40000000
+#define DPMFlags_Debug				0x80000000
+
+#define SMU_EnabledFeatureScoreboard_AcpDpmOn   0x00000001 /* bit 0 */
+#define SMU_EnabledFeatureScoreboard_UvdDpmOn   0x00800000 /* bit 23 */
+#define SMU_EnabledFeatureScoreboard_VceDpmOn   0x01000000 /* bit 24 */
+
+struct cc6_settings {
+	bool cc6_setting_changed;
+	bool nb_pstate_switch_disable;/* controls NB PState switch */
+	bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
+	bool cpu_pstate_disable;
+	uint32_t cpu_pstate_separation_time;
+};
+
+struct cz_hwmgr {
+	uint32_t dpm_interval;
+
+	uint32_t voltage_drop_threshold;
+
+	uint32_t voting_rights_clients;
+
+	uint32_t disable_driver_thermal_policy;
+
+	uint32_t static_screen_threshold;
+
+	uint32_t gfx_power_gating_threshold;
+
+	uint32_t activity_hysteresis;
+	uint32_t bootup_sclk_divider;
+	uint32_t gfx_ramp_step;
+	uint32_t gfx_ramp_delay; /* in micro-seconds */
+
+	uint32_t thermal_auto_throttling_treshold;
+
+	struct cz_sys_info sys_info;
+
+	struct cz_power_level boot_power_level;
+	struct cz_power_state *cz_current_ps;
+	struct cz_power_state *cz_requested_ps;
+
+	uint32_t mgcg_cgtt_local0;
+	uint32_t mgcg_cgtt_local1;
+
+	uint32_t tdr_clock; /* in 10khz unit */
+
+	uint32_t ddi_power_gating_disabled;
+	uint32_t disable_gfx_power_gating_in_uvd;
+	uint32_t disable_nb_ps3_in_battery;
+
+	uint32_t lock_nb_ps_in_uvd_play_back;
+
+	struct cz_display_phy_info display_phy_info;
+	uint32_t vce_slow_sclk_threshold; /* default 200mhz */
+	uint32_t dce_slow_sclk_threshold; /* default 300mhz */
+	uint32_t min_sclk_did;  /* minimum sclk divider */
+
+	bool disp_clk_bypass;
+	bool disp_clk_bypass_pending;
+	uint32_t bapm_enabled;
+	uint32_t clock_slow_down_freq;
+	uint32_t skip_clock_slow_down;
+	uint32_t enable_nb_ps_policy;
+	uint32_t voltage_drop_in_dce_power_gating;
+	uint32_t uvd_dpm_interval;
+	uint32_t override_dynamic_mgpg;
+	uint32_t lclk_deep_enabled;
+
+	uint32_t uvd_performance;
+
+	bool video_start;
+	bool battery_state;
+	uint32_t lowest_valid;
+	uint32_t highest_valid;
+	uint32_t high_voltage_threshold;
+	uint32_t is_nb_dpm_enabled;
+	struct cc6_settings cc6_settings;
+	uint32_t is_voltage_island_enabled;
+
+	bool pgacpinit;
+
+	uint8_t disp_config;
+
+	/* PowerTune */
+	uint32_t power_containment_features;
+	bool cac_enabled;
+	bool disable_uvd_power_tune_feature;
+	bool enable_ba_pm_feature;
+	bool enable_tdc_limit_feature;
+
+	uint32_t sram_end;
+	uint32_t dpm_table_start;
+	uint32_t soft_regs_start;
+
+	uint8_t uvd_level_count;
+	uint8_t vce_level_count;
+
+	uint8_t acp_level_count;
+	uint8_t samu_level_count;
+	uint32_t fps_high_threshold;
+	uint32_t fps_low_threshold;
+
+	uint32_t dpm_flags;
+	struct cz_dpm_entry sclk_dpm;
+	struct cz_dpm_entry uvd_dpm;
+	struct cz_dpm_entry vce_dpm;
+	struct cz_dpm_entry acp_dpm;
+
+	uint8_t uvd_boot_level;
+	uint8_t vce_boot_level;
+	uint8_t acp_boot_level;
+	uint8_t samu_boot_level;
+	uint8_t uvd_interval;
+	uint8_t vce_interval;
+	uint8_t acp_interval;
+	uint8_t samu_interval;
+
+	uint8_t graphics_interval;
+	uint8_t graphics_therm_throttle_enable;
+	uint8_t graphics_voltage_change_enable;
+
+	uint8_t graphics_clk_slow_enable;
+	uint8_t graphics_clk_slow_divider;
+
+	uint32_t display_cac;
+	uint32_t low_sclk_interrupt_threshold;
+
+	uint32_t dram_log_addr_h;
+	uint32_t dram_log_addr_l;
+	uint32_t dram_log_phy_addr_h;
+	uint32_t dram_log_phy_addr_l;
+	uint32_t dram_log_buff_size;
+
+	bool uvd_power_gated;
+	bool vce_power_gated;
+	bool samu_power_gated;
+	bool acp_power_gated;
+	bool acp_power_up_no_dsp;
+	uint32_t active_process_mask;
+
+	uint32_t max_sclk_level;
+	uint32_t num_of_clk_entries;
+};
+
+struct pp_hwmgr;
+
+int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
+int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
+int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
+int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
+int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
+#endif /* _CZ_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 65e2726..8f3f2cc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -23,7 +23,7 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
 	  polaris10_smumgr.o iceland_smumgr.o \
 	  smu7_smumgr.o smu9_0_smumgr.o smu10_smumgr.o ci_smumgr.o
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
deleted file mode 100644
index 957739a..0000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include "cgs_common.h"
-#include "smu/smu_8_0_d.h"
-#include "smu/smu_8_0_sh_mask.h"
-#include "smu8.h"
-#include "smu8_fusion.h"
-#include "cz_smumgr.h"
-#include "cz_ppsmc.h"
-#include "smu_ucode_xfer_cz.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "smumgr.h"
-
-#define SIZE_ALIGN_32(x)    (((x) + 31) / 32 * 32)
-
-static const enum cz_scratch_entry firmware_list[] = {
-	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
-	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
-	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
-};
-
-static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
-{
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	return cgs_read_register(hwmgr->device,
-					mmSMU_MP1_SRBM2P_ARG_0);
-}
-
-static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-	int result = 0;
-
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
-					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-	if (result != 0) {
-		pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
-		return result;
-	}
-
-	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
-	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
-
-	return 0;
-}
-
-/* Send a message to the SMC, and wait for its response.*/
-static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-	int result = 0;
-
-	result = cz_send_msg_to_smc_async(hwmgr, msg);
-	if (result != 0)
-		return result;
-
-	return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
-					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-}
-
-static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
-				     uint32_t smc_address, uint32_t limit)
-{
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	if (0 != (3 & smc_address)) {
-		pr_err("SMC address must be 4 byte aligned\n");
-		return -EINVAL;
-	}
-
-	if (limit <= (smc_address + 3)) {
-		pr_err("SMC address beyond the SMC RAM area\n");
-		return -EINVAL;
-	}
-
-	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
-				SMN_MP1_SRAM_START_ADDR + smc_address);
-
-	return 0;
-}
-
-static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
-		uint32_t smc_address, uint32_t value, uint32_t limit)
-{
-	int result;
-
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
-	if (!result)
-		cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
-
-	return result;
-}
-
-static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-					  uint16_t msg, uint32_t parameter)
-{
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
-	return cz_send_msg_to_smc(hwmgr, msg);
-}
-
-static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
-				   uint32_t firmware)
-{
-	int i;
-	uint32_t index = SMN_MP1_SRAM_START_ADDR +
-			 SMU8_FIRMWARE_HEADER_LOCATION +
-			 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
-
-	for (i = 0; i < hwmgr->usec_timeout; i++) {
-		if (firmware ==
-			(cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
-			break;
-		udelay(1);
-	}
-
-	if (i >= hwmgr->usec_timeout) {
-		pr_err("SMU check loaded firmware failed.\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
-{
-	uint32_t reg_data;
-	uint32_t tmp;
-	int ret = 0;
-	struct cgs_firmware_info info = {0};
-	struct cz_smumgr *cz_smu;
-
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	ret = cgs_get_firmware_info(hwmgr->device,
-						CGS_UCODE_ID_CP_MEC, &info);
-
-	if (ret)
-		return -EINVAL;
-
-	/* Disable MEC parsing/prefetching */
-	tmp = cgs_read_register(hwmgr->device,
-					mmCP_MEC_CNTL);
-	tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
-	tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
-	cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
-
-	tmp = cgs_read_register(hwmgr->device,
-					mmCP_CPC_IC_BASE_CNTL);
-
-	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
-	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
-	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
-	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
-	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
-
-	reg_data = lower_32_bits(info.mc_addr) &
-			PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
-	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
-
-	reg_data = upper_32_bits(info.mc_addr) &
-			PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
-	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
-
-	return 0;
-}
-
-static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
-			enum cz_scratch_entry firmware_enum)
-{
-	uint8_t ret = 0;
-
-	switch (firmware_enum) {
-	case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
-		ret = UCODE_ID_SDMA0;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
-		if (hwmgr->chip_id == CHIP_STONEY)
-			ret = UCODE_ID_SDMA0;
-		else
-			ret = UCODE_ID_SDMA1;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
-		ret = UCODE_ID_CP_CE;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
-		ret = UCODE_ID_CP_PFP;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
-		ret = UCODE_ID_CP_ME;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
-		ret = UCODE_ID_CP_MEC_JT1;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
-		if (hwmgr->chip_id == CHIP_STONEY)
-			ret = UCODE_ID_CP_MEC_JT1;
-		else
-			ret = UCODE_ID_CP_MEC_JT2;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
-		ret = UCODE_ID_GMCON_RENG;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
-		ret = UCODE_ID_RLC_G;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
-		ret = UCODE_ID_RLC_SCRATCH;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
-		ret = UCODE_ID_RLC_SRM_ARAM;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
-		ret = UCODE_ID_RLC_SRM_DRAM;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
-		ret = UCODE_ID_DMCU_ERAM;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
-		ret = UCODE_ID_DMCU_IRAM;
-		break;
-	case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
-		ret = TASK_ARG_INIT_MM_PWR_LOG;
-		break;
-	case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
-	case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
-	case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
-	case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
-	case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
-	case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
-		ret = TASK_ARG_REG_MMIO;
-		break;
-	case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
-		ret = TASK_ARG_INIT_CLK_TABLE;
-		break;
-	}
-
-	return ret;
-}
-
-static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
-{
-	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
-	switch (fw_type) {
-	case UCODE_ID_SDMA0:
-		result = CGS_UCODE_ID_SDMA0;
-		break;
-	case UCODE_ID_SDMA1:
-		result = CGS_UCODE_ID_SDMA1;
-		break;
-	case UCODE_ID_CP_CE:
-		result = CGS_UCODE_ID_CP_CE;
-		break;
-	case UCODE_ID_CP_PFP:
-		result = CGS_UCODE_ID_CP_PFP;
-		break;
-	case UCODE_ID_CP_ME:
-		result = CGS_UCODE_ID_CP_ME;
-		break;
-	case UCODE_ID_CP_MEC_JT1:
-		result = CGS_UCODE_ID_CP_MEC_JT1;
-		break;
-	case UCODE_ID_CP_MEC_JT2:
-		result = CGS_UCODE_ID_CP_MEC_JT2;
-		break;
-	case UCODE_ID_RLC_G:
-		result = CGS_UCODE_ID_RLC_G;
-		break;
-	default:
-		break;
-	}
-
-	return result;
-}
-
-static int cz_smu_populate_single_scratch_task(
-			struct pp_hwmgr *hwmgr,
-			enum cz_scratch_entry fw_enum,
-			uint8_t type, bool is_last)
-{
-	uint8_t i;
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-	struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
-
-	task->type = type;
-	task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
-	task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
-
-	for (i = 0; i < cz_smu->scratch_buffer_length; i++)
-		if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
-			break;
-
-	if (i >= cz_smu->scratch_buffer_length) {
-		pr_err("Invalid Firmware Type\n");
-		return -EINVAL;
-	}
-
-	task->addr.low = lower_32_bits(cz_smu->scratch_buffer[i].mc_addr);
-	task->addr.high = upper_32_bits(cz_smu->scratch_buffer[i].mc_addr);
-	task->size_bytes = cz_smu->scratch_buffer[i].data_size;
-
-	if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
-		struct cz_ih_meta_data *pIHReg_restore =
-		     (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
-		pIHReg_restore->command =
-			METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
-	}
-
-	return 0;
-}
-
-static int cz_smu_populate_single_ucode_load_task(
-					struct pp_hwmgr *hwmgr,
-					enum cz_scratch_entry fw_enum,
-					bool is_last)
-{
-	uint8_t i;
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-	struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
-
-	task->type = TASK_TYPE_UCODE_LOAD;
-	task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
-	task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
-
-	for (i = 0; i < cz_smu->driver_buffer_length; i++)
-		if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
-			break;
-
-	if (i >= cz_smu->driver_buffer_length) {
-		pr_err("Invalid Firmware Type\n");
-		return -EINVAL;
-	}
-
-	task->addr.low = lower_32_bits(cz_smu->driver_buffer[i].mc_addr);
-	task->addr.high = upper_32_bits(cz_smu->driver_buffer[i].mc_addr);
-	task->size_bytes = cz_smu->driver_buffer[i].data_size;
-
-	return 0;
-}
-
-static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
-	cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
-	cz_smu_populate_single_scratch_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
-				TASK_TYPE_UCODE_SAVE, true);
-
-	return 0;
-}
-
-static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
-{
-	int i;
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
-	for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
-		toc->JobList[i] = (uint8_t)IGNORE_JOB;
-
-	return 0;
-}
-
-static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
-	toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
-	cz_smu_populate_single_scratch_task(hwmgr,
-				    CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
-				    TASK_TYPE_UCODE_SAVE, false);
-
-	cz_smu_populate_single_scratch_task(hwmgr,
-				    CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
-				    TASK_TYPE_UCODE_SAVE, true);
-
-	return 0;
-}
-
-
-static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
-	toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
-
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-
-	if (hwmgr->chip_id == CHIP_STONEY)
-		cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-	else
-		cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
-
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
-
-	/* populate scratch */
-	cz_smu_populate_single_scratch_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
-				TASK_TYPE_UCODE_LOAD, false);
-
-	cz_smu_populate_single_scratch_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
-				TASK_TYPE_UCODE_LOAD, false);
-
-	cz_smu_populate_single_scratch_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
-				TASK_TYPE_UCODE_LOAD, true);
-
-	return 0;
-}
-
-static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
-	cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
-
-	cz_smu_populate_single_scratch_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
-				TASK_TYPE_INITIALIZE, true);
-	return 0;
-}
-
-static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
-	cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
-
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-	if (hwmgr->chip_id != CHIP_STONEY)
-		cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-	if (hwmgr->chip_id != CHIP_STONEY)
-		cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
-	cz_smu_populate_single_ucode_load_task(hwmgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
-
-	return 0;
-}
-
-static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
-	cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
-
-	cz_smu_populate_single_scratch_task(hwmgr,
-				CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
-				TASK_TYPE_INITIALIZE, true);
-
-	return 0;
-}
-
-static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
-	cz_smu->toc_entry_used_count = 0;
-	cz_smu_initialize_toc_empty_job_list(hwmgr);
-	cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
-	cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
-	cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
-	cz_smu_construct_toc_for_power_profiling(hwmgr);
-	cz_smu_construct_toc_for_bootup(hwmgr);
-	cz_smu_construct_toc_for_clock_table(hwmgr);
-
-	return 0;
-}
-
-static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	uint32_t firmware_type;
-	uint32_t i;
-	int ret;
-	enum cgs_ucode_id ucode_id;
-	struct cgs_firmware_info info = {0};
-
-	cz_smu->driver_buffer_length = 0;
-
-	for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
-
-		firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
-					firmware_list[i]);
-
-		ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
-
-		ret = cgs_get_firmware_info(hwmgr->device,
-							ucode_id, &info);
-
-		if (ret == 0) {
-			cz_smu->driver_buffer[i].mc_addr = info.mc_addr;
-
-			cz_smu->driver_buffer[i].data_size = info.image_size;
-
-			cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
-			cz_smu->driver_buffer_length++;
-		}
-	}
-
-	return 0;
-}
-
-static int cz_smu_populate_single_scratch_entry(
-				struct pp_hwmgr *hwmgr,
-				enum cz_scratch_entry scratch_type,
-				uint32_t ulsize_byte,
-				struct cz_buffer_entry *entry)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
-
-	entry->data_size = ulsize_byte;
-	entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
-				cz_smu->smu_buffer_used_bytes;
-	entry->mc_addr = cz_smu->smu_buffer.mc_addr + cz_smu->smu_buffer_used_bytes;
-	entry->firmware_ID = scratch_type;
-
-	cz_smu->smu_buffer_used_bytes += ulsize_aligned;
-
-	return 0;
-}
-
-static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	unsigned long i;
-
-	for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
-		if (cz_smu->scratch_buffer[i].firmware_ID
-			== CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
-			break;
-	}
-
-	*table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
-
-	cz_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetClkTableAddrHi,
-				upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
-	cz_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetClkTableAddrLo,
-				lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
-	cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-				cz_smu->toc_entry_clock_table);
-
-	cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
-
-	return 0;
-}
-
-static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	unsigned long i;
-
-	for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
-		if (cz_smu->scratch_buffer[i].firmware_ID
-				== CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
-			break;
-	}
-
-	cz_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetClkTableAddrHi,
-				upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
-	cz_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_SetClkTableAddrLo,
-				lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
-	cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-				cz_smu->toc_entry_clock_table);
-
-	cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
-
-	return 0;
-}
-
-static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
-	uint32_t smc_address;
-
-	if (!hwmgr->reload_fw) {
-		pr_info("skip reloading...\n");
-		return 0;
-	}
-
-	cz_smu_populate_firmware_entries(hwmgr);
-
-	cz_smu_construct_toc(hwmgr);
-
-	smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
-		offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
-	cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
-
-	cz_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DriverDramAddrHi,
-					upper_32_bits(cz_smu->toc_buffer.mc_addr));
-
-	cz_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_DriverDramAddrLo,
-					lower_32_bits(cz_smu->toc_buffer.mc_addr));
-
-	cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
-
-	cz_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_ExecuteJob,
-					cz_smu->toc_entry_aram);
-	cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
-				cz_smu->toc_entry_power_profiling_index);
-
-	return cz_send_msg_to_smc_with_parameter(hwmgr,
-					PPSMC_MSG_ExecuteJob,
-					cz_smu->toc_entry_initialize_index);
-}
-
-static int cz_start_smu(struct pp_hwmgr *hwmgr)
-{
-	int ret = 0;
-	uint32_t fw_to_check = 0;
-	struct cgs_firmware_info info = {0};
-	uint32_t index = SMN_MP1_SRAM_START_ADDR +
-			 SMU8_FIRMWARE_HEADER_LOCATION +
-			 offsetof(struct SMU8_Firmware_Header, Version);
-
-
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
-	hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
-	info.version = hwmgr->smu_version >> 8;
-	cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
-
-	fw_to_check = UCODE_ID_RLC_G_MASK |
-			UCODE_ID_SDMA0_MASK |
-			UCODE_ID_SDMA1_MASK |
-			UCODE_ID_CP_CE_MASK |
-			UCODE_ID_CP_ME_MASK |
-			UCODE_ID_CP_PFP_MASK |
-			UCODE_ID_CP_MEC_JT1_MASK |
-			UCODE_ID_CP_MEC_JT2_MASK;
-
-	if (hwmgr->chip_id == CHIP_STONEY)
-		fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
-
-	ret = cz_request_smu_load_fw(hwmgr);
-	if (ret)
-		pr_err("SMU firmware load failed\n");
-
-	cz_check_fw_load_finish(hwmgr, fw_to_check);
-
-	ret = cz_load_mec_firmware(hwmgr);
-	if (ret)
-		pr_err("Mec Firmware load failed\n");
-
-	return ret;
-}
-
-static int cz_smu_init(struct pp_hwmgr *hwmgr)
-{
-	int ret = 0;
-	struct cz_smumgr *cz_smu;
-
-	cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
-	if (cz_smu == NULL)
-		return -ENOMEM;
-
-	hwmgr->smu_backend = cz_smu;
-
-	cz_smu->toc_buffer.data_size = 4096;
-	cz_smu->smu_buffer.data_size =
-		ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
-		ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
-		ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
-		ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
-		ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
-
-	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
-				cz_smu->toc_buffer.data_size,
-				PAGE_SIZE,
-				AMDGPU_GEM_DOMAIN_VRAM,
-				&cz_smu->toc_buffer.handle,
-				&cz_smu->toc_buffer.mc_addr,
-				&cz_smu->toc_buffer.kaddr);
-	if (ret)
-		return -EINVAL;
-
-	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
-				cz_smu->smu_buffer.data_size,
-				PAGE_SIZE,
-				AMDGPU_GEM_DOMAIN_VRAM,
-				&cz_smu->smu_buffer.handle,
-				&cz_smu->smu_buffer.mc_addr,
-				&cz_smu->smu_buffer.kaddr);
-	if (ret) {
-		amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
-					&cz_smu->toc_buffer.mc_addr,
-					&cz_smu->toc_buffer.kaddr);
-		return -EINVAL;
-	}
-
-	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
-		CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
-		UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
-		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
-		pr_err("Error when Populate Firmware Entry.\n");
-		return -1;
-	}
-
-	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
-		CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
-		UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
-		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
-		pr_err("Error when Populate Firmware Entry.\n");
-		return -1;
-	}
-	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
-		CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
-		UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
-		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
-		pr_err("Error when Populate Firmware Entry.\n");
-		return -1;
-	}
-
-	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
-		CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
-		sizeof(struct SMU8_MultimediaPowerLogData),
-		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
-		pr_err("Error when Populate Firmware Entry.\n");
-		return -1;
-	}
-
-	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
-		CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
-		sizeof(struct SMU8_Fusion_ClkTable),
-		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
-		pr_err("Error when Populate Firmware Entry.\n");
-		return -1;
-	}
-
-	return 0;
-}
-
-static int cz_smu_fini(struct pp_hwmgr *hwmgr)
-{
-	struct cz_smumgr *cz_smu;
-
-	if (hwmgr == NULL || hwmgr->device == NULL)
-		return -EINVAL;
-
-	cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-	if (cz_smu) {
-		amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
-					&cz_smu->toc_buffer.mc_addr,
-					&cz_smu->toc_buffer.kaddr);
-		amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle,
-					&cz_smu->smu_buffer.mc_addr,
-					&cz_smu->smu_buffer.kaddr);
-		kfree(cz_smu);
-	}
-
-	return 0;
-}
-
-static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
-				unsigned long check_feature)
-{
-	int result;
-	unsigned long features;
-
-	result = cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
-	if (result == 0) {
-		features = smum_get_argument(hwmgr);
-		if (features & check_feature)
-			return true;
-	}
-
-	return false;
-}
-
-static bool cz_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-	if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
-		return true;
-	return false;
-}
-
-const struct pp_smumgr_func cz_smu_funcs = {
-	.smu_init = cz_smu_init,
-	.smu_fini = cz_smu_fini,
-	.start_smu = cz_start_smu,
-	.check_fw_load_finish = cz_check_fw_load_finish,
-	.request_smu_load_fw = NULL,
-	.request_smu_load_specific_fw = NULL,
-	.get_argument = cz_smum_get_argument,
-	.send_msg_to_smc = cz_send_msg_to_smc,
-	.send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
-	.download_pptable_settings = cz_download_pptable_settings,
-	.upload_pptable_settings = cz_upload_pptable_settings,
-	.is_dpm_running = cz_is_dpm_running,
-};
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
deleted file mode 100644
index c13ab83..0000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _CZ_SMUMGR_H_
-#define _CZ_SMUMGR_H_
-
-
-#define MAX_NUM_FIRMWARE                        8
-#define MAX_NUM_SCRATCH                         11
-#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING      1024
-#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING    2048
-#define CZ_SCRATCH_SIZE_SDMA_METADATA           1024
-#define CZ_SCRATCH_SIZE_IH                      ((2*256+1)*4)
-
-#define SMU_EnabledFeatureScoreboard_SclkDpmOn    0x00200000
-
-enum cz_scratch_entry {
-	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
-	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
-	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
-	CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
-	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
-	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
-	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
-	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
-	CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
-	CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
-	CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
-	CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
-	CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
-	CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
-	CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
-	CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
-	CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
-	CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
-};
-
-struct cz_buffer_entry {
-	uint32_t data_size;
-	uint64_t mc_addr;
-	void *kaddr;
-	enum cz_scratch_entry firmware_ID;
-	struct amdgpu_bo *handle; /* as bo handle used when release bo */
-};
-
-struct cz_register_index_data_pair {
-	uint32_t offset;
-	uint32_t value;
-};
-
-struct cz_ih_meta_data {
-	uint32_t command;
-	struct cz_register_index_data_pair register_index_value_pair[1];
-};
-
-struct cz_smumgr {
-	uint8_t driver_buffer_length;
-	uint8_t scratch_buffer_length;
-	uint16_t toc_entry_used_count;
-	uint16_t toc_entry_initialize_index;
-	uint16_t toc_entry_power_profiling_index;
-	uint16_t toc_entry_aram;
-	uint16_t toc_entry_ih_register_restore_task_index;
-	uint16_t toc_entry_clock_table;
-	uint16_t ih_register_restore_task_size;
-	uint16_t smu_buffer_used_bytes;
-
-	struct cz_buffer_entry toc_buffer;
-	struct cz_buffer_entry smu_buffer;
-	struct cz_buffer_entry firmware_buffer;
-	struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
-	struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
-	struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
new file mode 100644
index 0000000..3cca257
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -0,0 +1,883 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "cgs_common.h"
+#include "smu/smu_8_0_d.h"
+#include "smu/smu_8_0_sh_mask.h"
+#include "smu8.h"
+#include "smu8_fusion.h"
+#include "smu8_smumgr.h"
+#include "cz_ppsmc.h"
+#include "smu_ucode_xfer_cz.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "smumgr.h"
+
+#define SIZE_ALIGN_32(x)    (((x) + 31) / 32 * 32)
+
+static const enum cz_scratch_entry firmware_list[] = {
+	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
+	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+};
+
+static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	return cgs_read_register(hwmgr->device,
+					mmSMU_MP1_SRBM2P_ARG_0);
+}
+
+static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+	int result = 0;
+
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+	if (result != 0) {
+		pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
+		return result;
+	}
+
+	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
+	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
+
+	return 0;
+}
+
+/* Send a message to the SMC, and wait for its response.*/
+static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+	int result = 0;
+
+	result = cz_send_msg_to_smc_async(hwmgr, msg);
+	if (result != 0)
+		return result;
+
+	return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+}
+
+static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+				     uint32_t smc_address, uint32_t limit)
+{
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	if (0 != (3 & smc_address)) {
+		pr_err("SMC address must be 4 byte aligned\n");
+		return -EINVAL;
+	}
+
+	if (limit <= (smc_address + 3)) {
+		pr_err("SMC address beyond the SMC RAM area\n");
+		return -EINVAL;
+	}
+
+	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
+				SMN_MP1_SRAM_START_ADDR + smc_address);
+
+	return 0;
+}
+
+static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
+		uint32_t smc_address, uint32_t value, uint32_t limit)
+{
+	int result;
+
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
+	if (!result)
+		cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
+
+	return result;
+}
+
+static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+					  uint16_t msg, uint32_t parameter)
+{
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
+
+	return cz_send_msg_to_smc(hwmgr, msg);
+}
+
+static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
+				   uint32_t firmware)
+{
+	int i;
+	uint32_t index = SMN_MP1_SRAM_START_ADDR +
+			 SMU8_FIRMWARE_HEADER_LOCATION +
+			 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+
+	for (i = 0; i < hwmgr->usec_timeout; i++) {
+		if (firmware ==
+			(cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
+			break;
+		udelay(1);
+	}
+
+	if (i >= hwmgr->usec_timeout) {
+		pr_err("SMU check loaded firmware failed.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reg_data;
+	uint32_t tmp;
+	int ret = 0;
+	struct cgs_firmware_info info = {0};
+	struct cz_smumgr *cz_smu;
+
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	ret = cgs_get_firmware_info(hwmgr->device,
+						CGS_UCODE_ID_CP_MEC, &info);
+
+	if (ret)
+		return -EINVAL;
+
+	/* Disable MEC parsing/prefetching */
+	tmp = cgs_read_register(hwmgr->device,
+					mmCP_MEC_CNTL);
+	tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+	tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+	cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
+
+	tmp = cgs_read_register(hwmgr->device,
+					mmCP_CPC_IC_BASE_CNTL);
+
+	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
+
+	reg_data = lower_32_bits(info.mc_addr) &
+			PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
+
+	reg_data = upper_32_bits(info.mc_addr) &
+			PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
+
+	return 0;
+}
+
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
+			enum cz_scratch_entry firmware_enum)
+{
+	uint8_t ret = 0;
+
+	switch (firmware_enum) {
+	case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
+		ret = UCODE_ID_SDMA0;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
+		if (hwmgr->chip_id == CHIP_STONEY)
+			ret = UCODE_ID_SDMA0;
+		else
+			ret = UCODE_ID_SDMA1;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
+		ret = UCODE_ID_CP_CE;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
+		ret = UCODE_ID_CP_PFP;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
+		ret = UCODE_ID_CP_ME;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
+		ret = UCODE_ID_CP_MEC_JT1;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
+		if (hwmgr->chip_id == CHIP_STONEY)
+			ret = UCODE_ID_CP_MEC_JT1;
+		else
+			ret = UCODE_ID_CP_MEC_JT2;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
+		ret = UCODE_ID_GMCON_RENG;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
+		ret = UCODE_ID_RLC_G;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
+		ret = UCODE_ID_RLC_SCRATCH;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
+		ret = UCODE_ID_RLC_SRM_ARAM;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
+		ret = UCODE_ID_RLC_SRM_DRAM;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
+		ret = UCODE_ID_DMCU_ERAM;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
+		ret = UCODE_ID_DMCU_IRAM;
+		break;
+	case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
+		ret = TASK_ARG_INIT_MM_PWR_LOG;
+		break;
+	case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
+	case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
+	case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
+	case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
+	case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
+	case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
+		ret = TASK_ARG_REG_MMIO;
+		break;
+	case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
+		ret = TASK_ARG_INIT_CLK_TABLE;
+		break;
+	}
+
+	return ret;
+}
+
+static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+	switch (fw_type) {
+	case UCODE_ID_SDMA0:
+		result = CGS_UCODE_ID_SDMA0;
+		break;
+	case UCODE_ID_SDMA1:
+		result = CGS_UCODE_ID_SDMA1;
+		break;
+	case UCODE_ID_CP_CE:
+		result = CGS_UCODE_ID_CP_CE;
+		break;
+	case UCODE_ID_CP_PFP:
+		result = CGS_UCODE_ID_CP_PFP;
+		break;
+	case UCODE_ID_CP_ME:
+		result = CGS_UCODE_ID_CP_ME;
+		break;
+	case UCODE_ID_CP_MEC_JT1:
+		result = CGS_UCODE_ID_CP_MEC_JT1;
+		break;
+	case UCODE_ID_CP_MEC_JT2:
+		result = CGS_UCODE_ID_CP_MEC_JT2;
+		break;
+	case UCODE_ID_RLC_G:
+		result = CGS_UCODE_ID_RLC_G;
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+static int cz_smu_populate_single_scratch_task(
+			struct pp_hwmgr *hwmgr,
+			enum cz_scratch_entry fw_enum,
+			uint8_t type, bool is_last)
+{
+	uint8_t i;
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
+	struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
+
+	task->type = type;
+	task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
+	task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
+
+	for (i = 0; i < cz_smu->scratch_buffer_length; i++)
+		if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
+			break;
+
+	if (i >= cz_smu->scratch_buffer_length) {
+		pr_err("Invalid Firmware Type\n");
+		return -EINVAL;
+	}
+
+	task->addr.low = lower_32_bits(cz_smu->scratch_buffer[i].mc_addr);
+	task->addr.high = upper_32_bits(cz_smu->scratch_buffer[i].mc_addr);
+	task->size_bytes = cz_smu->scratch_buffer[i].data_size;
+
+	if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
+		struct cz_ih_meta_data *pIHReg_restore =
+		     (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
+		pIHReg_restore->command =
+			METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
+	}
+
+	return 0;
+}
+
+static int cz_smu_populate_single_ucode_load_task(
+					struct pp_hwmgr *hwmgr,
+					enum cz_scratch_entry fw_enum,
+					bool is_last)
+{
+	uint8_t i;
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
+	struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
+
+	task->type = TASK_TYPE_UCODE_LOAD;
+	task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
+	task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
+
+	for (i = 0; i < cz_smu->driver_buffer_length; i++)
+		if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
+			break;
+
+	if (i >= cz_smu->driver_buffer_length) {
+		pr_err("Invalid Firmware Type\n");
+		return -EINVAL;
+	}
+
+	task->addr.low = lower_32_bits(cz_smu->driver_buffer[i].mc_addr);
+	task->addr.high = upper_32_bits(cz_smu->driver_buffer[i].mc_addr);
+	task->size_bytes = cz_smu->driver_buffer[i].data_size;
+
+	return 0;
+}
+
+static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+
+	cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
+	cz_smu_populate_single_scratch_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+				TASK_TYPE_UCODE_SAVE, true);
+
+	return 0;
+}
+
+static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
+
+	for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
+		toc->JobList[i] = (uint8_t)IGNORE_JOB;
+
+	return 0;
+}
+
+static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
+
+	toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
+	cz_smu_populate_single_scratch_task(hwmgr,
+				    CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+				    TASK_TYPE_UCODE_SAVE, false);
+
+	cz_smu_populate_single_scratch_task(hwmgr,
+				    CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+				    TASK_TYPE_UCODE_SAVE, true);
+
+	return 0;
+}
+
+
+static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
+
+	toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
+
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+
+	if (hwmgr->chip_id == CHIP_STONEY)
+		cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+	else
+		cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
+
+	/* populate scratch */
+	cz_smu_populate_single_scratch_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+				TASK_TYPE_UCODE_LOAD, false);
+
+	cz_smu_populate_single_scratch_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+				TASK_TYPE_UCODE_LOAD, false);
+
+	cz_smu_populate_single_scratch_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+				TASK_TYPE_UCODE_LOAD, true);
+
+	return 0;
+}
+
+static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+
+	cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
+
+	cz_smu_populate_single_scratch_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+				TASK_TYPE_INITIALIZE, true);
+	return 0;
+}
+
+static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+
+	cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
+
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
+	if (hwmgr->chip_id != CHIP_STONEY)
+		cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+	if (hwmgr->chip_id != CHIP_STONEY)
+		cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+	cz_smu_populate_single_ucode_load_task(hwmgr,
+				CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
+
+	return 0;
+}
+
+static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+
+	cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
+
+	cz_smu_populate_single_scratch_task(hwmgr,
+				CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+				TASK_TYPE_INITIALIZE, true);
+
+	return 0;
+}
+
+static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+
+	cz_smu->toc_entry_used_count = 0;
+	cz_smu_initialize_toc_empty_job_list(hwmgr);
+	cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
+	cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
+	cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
+	cz_smu_construct_toc_for_power_profiling(hwmgr);
+	cz_smu_construct_toc_for_bootup(hwmgr);
+	cz_smu_construct_toc_for_clock_table(hwmgr);
+
+	return 0;
+}
+
+static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	uint32_t firmware_type;
+	uint32_t i;
+	int ret;
+	enum cgs_ucode_id ucode_id;
+	struct cgs_firmware_info info = {0};
+
+	cz_smu->driver_buffer_length = 0;
+
+	for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
+
+		firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
+					firmware_list[i]);
+
+		ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
+
+		ret = cgs_get_firmware_info(hwmgr->device,
+							ucode_id, &info);
+
+		if (ret == 0) {
+			cz_smu->driver_buffer[i].mc_addr = info.mc_addr;
+
+			cz_smu->driver_buffer[i].data_size = info.image_size;
+
+			cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
+			cz_smu->driver_buffer_length++;
+		}
+	}
+
+	return 0;
+}
+
+static int cz_smu_populate_single_scratch_entry(
+				struct pp_hwmgr *hwmgr,
+				enum cz_scratch_entry scratch_type,
+				uint32_t ulsize_byte,
+				struct cz_buffer_entry *entry)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
+
+	entry->data_size = ulsize_byte;
+	entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
+				cz_smu->smu_buffer_used_bytes;
+	entry->mc_addr = cz_smu->smu_buffer.mc_addr + cz_smu->smu_buffer_used_bytes;
+	entry->firmware_ID = scratch_type;
+
+	cz_smu->smu_buffer_used_bytes += ulsize_aligned;
+
+	return 0;
+}
+
+static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	unsigned long i;
+
+	for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
+		if (cz_smu->scratch_buffer[i].firmware_ID
+			== CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+			break;
+	}
+
+	*table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
+
+	cz_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetClkTableAddrHi,
+				upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
+
+	cz_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetClkTableAddrLo,
+				lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
+
+	cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+				cz_smu->toc_entry_clock_table);
+
+	cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+
+	return 0;
+}
+
+static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	unsigned long i;
+
+	for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
+		if (cz_smu->scratch_buffer[i].firmware_ID
+				== CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+			break;
+	}
+
+	cz_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetClkTableAddrHi,
+				upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
+
+	cz_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetClkTableAddrLo,
+				lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
+
+	cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+				cz_smu->toc_entry_clock_table);
+
+	cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+
+	return 0;
+}
+
+static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
+	uint32_t smc_address;
+
+	if (!hwmgr->reload_fw) {
+		pr_info("skip reloading...\n");
+		return 0;
+	}
+
+	cz_smu_populate_firmware_entries(hwmgr);
+
+	cz_smu_construct_toc(hwmgr);
+
+	smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
+		offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+	cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
+
+	cz_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DriverDramAddrHi,
+					upper_32_bits(cz_smu->toc_buffer.mc_addr));
+
+	cz_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_DriverDramAddrLo,
+					lower_32_bits(cz_smu->toc_buffer.mc_addr));
+
+	cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+
+	cz_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_ExecuteJob,
+					cz_smu->toc_entry_aram);
+	cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+				cz_smu->toc_entry_power_profiling_index);
+
+	return cz_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_ExecuteJob,
+					cz_smu->toc_entry_initialize_index);
+}
+
+static int cz_start_smu(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+	uint32_t fw_to_check = 0;
+	struct cgs_firmware_info info = {0};
+	uint32_t index = SMN_MP1_SRAM_START_ADDR +
+			 SMU8_FIRMWARE_HEADER_LOCATION +
+			 offsetof(struct SMU8_Firmware_Header, Version);
+
+
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+	hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+	info.version = hwmgr->smu_version >> 8;
+	cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
+	fw_to_check = UCODE_ID_RLC_G_MASK |
+			UCODE_ID_SDMA0_MASK |
+			UCODE_ID_SDMA1_MASK |
+			UCODE_ID_CP_CE_MASK |
+			UCODE_ID_CP_ME_MASK |
+			UCODE_ID_CP_PFP_MASK |
+			UCODE_ID_CP_MEC_JT1_MASK |
+			UCODE_ID_CP_MEC_JT2_MASK;
+
+	if (hwmgr->chip_id == CHIP_STONEY)
+		fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
+	ret = cz_request_smu_load_fw(hwmgr);
+	if (ret)
+		pr_err("SMU firmware load failed\n");
+
+	cz_check_fw_load_finish(hwmgr, fw_to_check);
+
+	ret = cz_load_mec_firmware(hwmgr);
+	if (ret)
+		pr_err("Mec Firmware load failed\n");
+
+	return ret;
+}
+
+static int cz_smu_init(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+	struct cz_smumgr *cz_smu;
+
+	cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
+	if (cz_smu == NULL)
+		return -ENOMEM;
+
+	hwmgr->smu_backend = cz_smu;
+
+	cz_smu->toc_buffer.data_size = 4096;
+	cz_smu->smu_buffer.data_size =
+		ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
+		ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
+		ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
+		ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
+		ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
+
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+				cz_smu->toc_buffer.data_size,
+				PAGE_SIZE,
+				AMDGPU_GEM_DOMAIN_VRAM,
+				&cz_smu->toc_buffer.handle,
+				&cz_smu->toc_buffer.mc_addr,
+				&cz_smu->toc_buffer.kaddr);
+	if (ret)
+		return -EINVAL;
+
+	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+				cz_smu->smu_buffer.data_size,
+				PAGE_SIZE,
+				AMDGPU_GEM_DOMAIN_VRAM,
+				&cz_smu->smu_buffer.handle,
+				&cz_smu->smu_buffer.mc_addr,
+				&cz_smu->smu_buffer.kaddr);
+	if (ret) {
+		amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
+					&cz_smu->toc_buffer.mc_addr,
+					&cz_smu->toc_buffer.kaddr);
+		return -EINVAL;
+	}
+
+	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
+		CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+		UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
+		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
+		pr_err("Error when Populate Firmware Entry.\n");
+		return -1;
+	}
+
+	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
+		CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+		UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
+		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
+		pr_err("Error when Populate Firmware Entry.\n");
+		return -1;
+	}
+	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
+		CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+		UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
+		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
+		pr_err("Error when Populate Firmware Entry.\n");
+		return -1;
+	}
+
+	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
+		CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+		sizeof(struct SMU8_MultimediaPowerLogData),
+		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
+		pr_err("Error when Populate Firmware Entry.\n");
+		return -1;
+	}
+
+	if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
+		CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+		sizeof(struct SMU8_Fusion_ClkTable),
+		&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
+		pr_err("Error when Populate Firmware Entry.\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int cz_smu_fini(struct pp_hwmgr *hwmgr)
+{
+	struct cz_smumgr *cz_smu;
+
+	if (hwmgr == NULL || hwmgr->device == NULL)
+		return -EINVAL;
+
+	cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+	if (cz_smu) {
+		amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
+					&cz_smu->toc_buffer.mc_addr,
+					&cz_smu->toc_buffer.kaddr);
+		amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle,
+					&cz_smu->smu_buffer.mc_addr,
+					&cz_smu->smu_buffer.kaddr);
+		kfree(cz_smu);
+	}
+
+	return 0;
+}
+
+static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
+				unsigned long check_feature)
+{
+	int result;
+	unsigned long features;
+
+	result = cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+	if (result == 0) {
+		features = smum_get_argument(hwmgr);
+		if (features & check_feature)
+			return true;
+	}
+
+	return false;
+}
+
+static bool cz_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
+		return true;
+	return false;
+}
+
+const struct pp_smumgr_func cz_smu_funcs = {
+	.smu_init = cz_smu_init,
+	.smu_fini = cz_smu_fini,
+	.start_smu = cz_start_smu,
+	.check_fw_load_finish = cz_check_fw_load_finish,
+	.request_smu_load_fw = NULL,
+	.request_smu_load_specific_fw = NULL,
+	.get_argument = cz_smum_get_argument,
+	.send_msg_to_smc = cz_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
+	.download_pptable_settings = cz_download_pptable_settings,
+	.upload_pptable_settings = cz_upload_pptable_settings,
+	.is_dpm_running = cz_is_dpm_running,
+};
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
new file mode 100644
index 0000000..c13ab83
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _CZ_SMUMGR_H_
+#define _CZ_SMUMGR_H_
+
+
+#define MAX_NUM_FIRMWARE                        8
+#define MAX_NUM_SCRATCH                         11
+#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING      1024
+#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING    2048
+#define CZ_SCRATCH_SIZE_SDMA_METADATA           1024
+#define CZ_SCRATCH_SIZE_IH                      ((2*256+1)*4)
+
+#define SMU_EnabledFeatureScoreboard_SclkDpmOn    0x00200000
+
+enum cz_scratch_entry {
+	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
+	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+	CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+	CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
+	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+	CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+	CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
+	CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
+	CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+	CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
+	CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
+	CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
+	CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
+	CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
+	CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
+	CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
+};
+
+struct cz_buffer_entry {
+	uint32_t data_size;
+	uint64_t mc_addr;
+	void *kaddr;
+	enum cz_scratch_entry firmware_ID;
+	struct amdgpu_bo *handle; /* as bo handle used when release bo */
+};
+
+struct cz_register_index_data_pair {
+	uint32_t offset;
+	uint32_t value;
+};
+
+struct cz_ih_meta_data {
+	uint32_t command;
+	struct cz_register_index_data_pair register_index_value_pair[1];
+};
+
+struct cz_smumgr {
+	uint8_t driver_buffer_length;
+	uint8_t scratch_buffer_length;
+	uint16_t toc_entry_used_count;
+	uint16_t toc_entry_initialize_index;
+	uint16_t toc_entry_power_profiling_index;
+	uint16_t toc_entry_aram;
+	uint16_t toc_entry_ih_register_restore_task_index;
+	uint16_t toc_entry_clock_table;
+	uint16_t ih_register_restore_task_size;
+	uint16_t smu_buffer_used_bytes;
+
+	struct cz_buffer_entry toc_buffer;
+	struct cz_buffer_entry smu_buffer;
+	struct cz_buffer_entry firmware_buffer;
+	struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
+	struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
+	struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
+};
+
+#endif
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/4] drm/amd/pp: Replace files name rv_* with smu10_*
       [not found] ` <1520331047-10759-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
  2018-03-06 10:10   ` [PATCH 3/4] drm/amd/pp: Replace files name cz_* with smu8_* Rex Zhu
@ 2018-03-06 16:41   ` Alex Deucher
       [not found]     ` <CADnq5_MiPCNrXnm3Ro9r4wW25HLar=13BhC5hxq+yMPK-hi7gg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  1 sibling, 1 reply; 4+ messages in thread
From: Alex Deucher @ 2018-03-06 16:41 UTC (permalink / raw)
  To: Rex Zhu; +Cc: amd-gfx list

On Tue, Mar 6, 2018 at 5:10 AM, Rex Zhu <Rex.Zhu@amd.com> wrote:
> Powerplay is for the hw ip smu, for RV, smu10 is used,
> so use smu10 as the prefix of the files name.
>
> Change-Id: Icf9c8a9d4b5deccd4fbfb9ecfab443db79934c77
> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/Makefile       |    2 +-
>  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     | 1075 -------------------
>  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h     |  322 ------
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  | 1076 ++++++++++++++++++++
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h  |  322 ++++++
>  drivers/gpu/drm/amd/powerplay/smumgr/Makefile      |    2 +-
>  drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c   |  399 --------
>  drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h   |   61 --
>  .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c    |  399 ++++++++
>  .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h    |   61 ++
>  10 files changed, 1860 insertions(+), 1859 deletions(-)
>  delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>  delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
>  create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
>  create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
>  delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
>  delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
>  create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
>  create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h


I'm ok with patches 1-3, although I think we should work on renaming
the functions eventually too to match the file names.  For patch 4, I
think we should stick with the asic names.  The smu7 variants are too
confusing.  I can't remember if CI was 7.0.1 or 7.0.2, etc.  I'd
prefer to align the file names to the smu interface headers (e.g.,
smu7, smu71, smu72, smu73, smu74).  BTW, did you use git mv to
generate these patches?

Alex
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH 1/4] drm/amd/pp: Replace files name rv_* with smu10_*
       [not found]     ` <CADnq5_MiPCNrXnm3Ro9r4wW25HLar=13BhC5hxq+yMPK-hi7gg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-03-07 11:48       ` Zhu, Rex
  0 siblings, 0 replies; 4+ messages in thread
From: Zhu, Rex @ 2018-03-07 11:48 UTC (permalink / raw)
  To: 'Alex Deucher'; +Cc: amd-gfx list

>For patch 4, I think we should stick with the asic names.  The smu7 variants are too confusing.  I can't remember if CI was 7.0.1 or 7.0.2, etc.  I'd prefer to align the file names to the 
>smu interface headers (e.g., smu7, smu71, smu72, smu73, smu74).  

Rex: For patch4, I am ok with the asic names for smu7. So I will drop this patch.

>BTW, did you use git mv to generate these patches?

Rex: No, I just rename the file name and git add -A, then the git status show file rename.

Best Regards
Rex 


-----Original Message-----
From: Alex Deucher [mailto:alexdeucher@gmail.com] 
Sent: Wednesday, March 07, 2018 12:42 AM
To: Zhu, Rex
Cc: amd-gfx list
Subject: Re: [PATCH 1/4] drm/amd/pp: Replace files name rv_* with smu10_*

On Tue, Mar 6, 2018 at 5:10 AM, Rex Zhu <Rex.Zhu@amd.com> wrote:
> Powerplay is for the hw ip smu, for RV, smu10 is used, so use smu10 as 
> the prefix of the files name.
>
> Change-Id: Icf9c8a9d4b5deccd4fbfb9ecfab443db79934c77
> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/Makefile       |    2 +-
>  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     | 1075 -------------------
>  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h     |  322 ------
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  | 1076 
> ++++++++++++++++++++  drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h  |  322 ++++++
>  drivers/gpu/drm/amd/powerplay/smumgr/Makefile      |    2 +-
>  drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c   |  399 --------
>  drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h   |   61 --
>  .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c    |  399 ++++++++
>  .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h    |   61 ++
>  10 files changed, 1860 insertions(+), 1859 deletions(-)  delete mode 
> 100644 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>  delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
>  create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
>  create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
>  delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
>  delete mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
>  create mode 100644 
> drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
>  create mode 100644 
> drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h


I'm ok with patches 1-3, although I think we should work on renaming the functions eventually too to match the file names.  For patch 4, I think we should stick with the asic names.  The smu7 variants are too confusing.  I can't remember if CI was 7.0.1 or 7.0.2, etc.  I'd prefer to align the file names to the smu interface headers (e.g., smu7, smu71, smu72, smu73, smu74).  BTW, did you use git mv to generate these patches?

Alex
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-03-07 11:48 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-06 10:10 [PATCH 1/4] drm/amd/pp: Replace files name rv_* with smu10_* Rex Zhu
     [not found] ` <1520331047-10759-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
2018-03-06 10:10   ` [PATCH 3/4] drm/amd/pp: Replace files name cz_* with smu8_* Rex Zhu
2018-03-06 16:41   ` [PATCH 1/4] drm/amd/pp: Replace files name rv_* with smu10_* Alex Deucher
     [not found]     ` <CADnq5_MiPCNrXnm3Ro9r4wW25HLar=13BhC5hxq+yMPK-hi7gg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-03-07 11:48       ` Zhu, Rex

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.