All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/13] drm/amd/powerplay: correct vega12 bootup values settings
@ 2018-06-19  7:38 Evan Quan
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:38 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

The vbios firmware structure changed between v3_1 and v3_2. So,
the code to setup bootup values needs different paths based
on header version.

Change-Id: I15140c4d80a91022f66a5052f4b9303fdab4ed9d
Signed-off-by: Evan Quan <evan.quan@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 94 +++++++++++++++++++---
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h |  3 +
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c |  3 +
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |  3 +
 4 files changed, 91 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 5325661..aa2faff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
 	return 0;
 }
 
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+			struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+			struct atom_firmware_info_v3_2 *fw_info)
+{
+	uint32_t frequency = 0;
+
+	boot_values->ulRevision = fw_info->firmware_revision;
+	boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+	boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+	boot_values->usVddc     = fw_info->bootup_vddc_mv;
+	boot_values->usVddci    = fw_info->bootup_vddci_mv;
+	boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+	boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+	boot_values->ucCoolingID = fw_info->coolingsolution_id;
+	boot_values->ulSocClk   = 0;
+	boot_values->ulDCEFClk   = 0;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+		boot_values->ulSocClk   = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+		boot_values->ulDCEFClk  = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+		boot_values->ulEClk     = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+		boot_values->ulVClk     = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+		boot_values->ulDClk     = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+			struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+			struct atom_firmware_info_v3_1 *fw_info)
+{
+	uint32_t frequency = 0;
+
+	boot_values->ulRevision = fw_info->firmware_revision;
+	boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+	boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+	boot_values->usVddc     = fw_info->bootup_vddc_mv;
+	boot_values->usVddci    = fw_info->bootup_vddci_mv;
+	boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+	boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+	boot_values->ucCoolingID = fw_info->coolingsolution_id;
+	boot_values->ulSocClk   = 0;
+	boot_values->ulDCEFClk   = 0;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+		boot_values->ulSocClk   = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+		boot_values->ulDCEFClk  = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+		boot_values->ulEClk     = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+		boot_values->ulVClk     = frequency;
+
+	if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+		boot_values->ulDClk     = frequency;
+}
+
 int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
 			struct pp_atomfwctrl_bios_boot_up_values *boot_values)
 {
-	struct atom_firmware_info_v3_1 *info = NULL;
+	struct atom_firmware_info_v3_2 *fwinfo_3_2;
+	struct atom_firmware_info_v3_1 *fwinfo_3_1;
+	struct atom_common_table_header *info = NULL;
 	uint16_t ix;
 
 	ix = GetIndexIntoMasterDataTable(firmwareinfo);
-	info = (struct atom_firmware_info_v3_1 *)
+	info = (struct atom_common_table_header *)
 		smu_atom_get_data_table(hwmgr->adev,
 				ix, NULL, NULL, NULL);
 
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
 		return -EINVAL;
 	}
 
-	boot_values->ulRevision = info->firmware_revision;
-	boot_values->ulGfxClk   = info->bootup_sclk_in10khz;
-	boot_values->ulUClk     = info->bootup_mclk_in10khz;
-	boot_values->usVddc     = info->bootup_vddc_mv;
-	boot_values->usVddci    = info->bootup_vddci_mv;
-	boot_values->usMvddc    = info->bootup_mvddc_mv;
-	boot_values->usVddGfx   = info->bootup_vddgfx_mv;
-	boot_values->ucCoolingID = info->coolingsolution_id;
-	boot_values->ulSocClk   = 0;
-	boot_values->ulDCEFClk   = 0;
+	if ((info->format_revision == 3) && (info->content_revision == 2)) {
+		fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+		pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+				boot_values, fwinfo_3_2);
+	} else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+		fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+		pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+				boot_values, fwinfo_3_1);
+	} else {
+		pr_info("Fw info table revision does not match!");
+		return -EINVAL;
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe10aa4..745bd38 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
 	uint32_t   ulUClk;
 	uint32_t   ulSocClk;
 	uint32_t   ulDCEFClk;
+	uint32_t   ulEClk;
+	uint32_t   ulVClk;
+	uint32_t   ulDClk;
 	uint16_t   usVddc;
 	uint16_t   usVddci;
 	uint16_t   usMvddc;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e209..e81661cc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -803,6 +803,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
 		data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
 		data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
 		data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+		data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+		data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+		data->vbios_boot_state.vclock = boot_up_values.ulVClk;
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 				PPSMC_MSG_SetMinDeepSleepDcefclk,
 			(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e81ded1..49b38df 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
 	uint32_t    mem_clock;
 	uint32_t    soc_clock;
 	uint32_t    dcef_clock;
+	uint32_t    eclock;
+	uint32_t    dclock;
+	uint32_t    vclock;
 };
 
 #define DPMTABLE_OD_UPDATE_SCLK     0x00000001
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 02/13] drm/amd/powerplay: smc_dpm_info structure change
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19  7:38   ` Evan Quan
       [not found]     ` <1529393945-16629-2-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:38   ` [PATCH 03/13] drm/amd/powerplay: drop the acg fix Evan Quan
                     ` (10 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:38 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

A new member Vr2_I2C_address is added.

Change-Id: I9821365721c9d73e1d2df2f65dfa97f39f0425c6
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/include/atomfirmware.h                   | 5 ++++-
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c           | 2 ++
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h           | 2 ++
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 2 ++
 drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h    | 5 ++++-
 5 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 092d800..33b4de4 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
 	uint8_t  acggfxclkspreadpercent;
 	uint16_t acggfxclkspreadfreq;
 
-	uint32_t boardreserved[10];
+	uint8_t Vr2_I2C_address;
+	uint8_t padding_vr2[3];
+
+	uint32_t boardreserved[9];
 };
 
 /* 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index aa2faff..d27c1c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -699,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
 	param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
 	param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
 
+	param->Vr2_I2C_address = info->Vr2_I2C_address;
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 745bd38..22e2166 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -210,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
 	uint8_t  acggfxclkspreadenabled;
 	uint8_t  acggfxclkspreadpercent;
 	uint16_t acggfxclkspreadfreq;
+
+	uint8_t Vr2_I2C_address;
 };
 
 int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 888ddca..2991470 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
 		ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
 	}
 
+	ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index 2f8a3b9..b08526f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -499,7 +499,10 @@ typedef struct {
 	uint8_t      AcgGfxclkSpreadPercent;
 	uint16_t     AcgGfxclkSpreadFreq;
 
-	uint32_t     BoardReserved[10];
+  uint8_t      Vr2_I2C_address;
+  uint8_t      padding_vr2[3];
+
+  uint32_t     BoardReserved[9];
 
 
   uint32_t     MmHubPadding[7];
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 03/13] drm/amd/powerplay: drop the acg fix
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:38   ` [PATCH 02/13] drm/amd/powerplay: smc_dpm_info structure change Evan Quan
@ 2018-06-19  7:38   ` Evan Quan
       [not found]     ` <1529393945-16629-3-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:38   ` [PATCH 04/13] drm/amd/powerplay: revise default dpm tables setup Evan Quan
                     ` (9 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:38 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

This workaround is not needed any more.

Change-Id: I81cb20ecd52d242af26ca32860baacdb5ec126c9
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 2991470..f4f366b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -224,12 +224,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
 	ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
 	ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
 
-	/* 0xFFFF will disable the ACG feature */
-	if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
-		ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
-		ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
-	}
-
 	ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
 
 	return 0;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 04/13] drm/amd/powerplay: revise default dpm tables setup
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:38   ` [PATCH 02/13] drm/amd/powerplay: smc_dpm_info structure change Evan Quan
  2018-06-19  7:38   ` [PATCH 03/13] drm/amd/powerplay: drop the acg fix Evan Quan
@ 2018-06-19  7:38   ` Evan Quan
       [not found]     ` <1529393945-16629-4-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:38   ` [PATCH 05/13] drm/amd/powerplay: retrieve all clock ranges on startup Evan Quan
                     ` (8 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:38 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

Initialize the soft/hard min/max level correctly and
handle the dpm disabled situation.

Change-Id: I9a1d303ee54ac4c9687f72c86097b008ae398c05
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 334 ++++++++-------------
 1 file changed, 132 insertions(+), 202 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index e81661cc..bc976e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -453,37 +453,30 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
  */
 static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
 {
-	dpm_state->soft_min_level = 0xff;
-	dpm_state->soft_max_level = 0xff;
-	dpm_state->hard_min_level = 0xff;
-	dpm_state->hard_max_level = 0xff;
+	dpm_state->soft_min_level = 0x0;
+	dpm_state->soft_max_level = 0xffff;
+	dpm_state->hard_min_level = 0x0;
+	dpm_state->hard_max_level = 0xffff;
 }
 
-static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
-		PPCLK_e clkID, uint32_t *num_dpm_level)
+static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+		PPCLK_e clk_id, uint32_t *num_of_levels)
 {
-	int result;
-	/*
-	 * SMU expects the Clock ID to be in the top 16 bits.
-	 * Lower 16 bits specify the level however 0xFF is a
-	 * special argument the returns the total number of levels
-	 */
-	PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
-		PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
-		"[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
-		return -EINVAL);
-
-	result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
+	int ret = 0;
 
-	PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
-		"[GetNumberDPMLevel] Number of DPM levels is greater than limit",
-		return -EINVAL);
+	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_GetDpmFreqByIndex,
+			(clk_id << 16 | 0xFF));
+	PP_ASSERT_WITH_CODE(!ret,
+			"[GetNumOfDpmLevel] failed to get dpm levels!",
+			return ret);
 
-	PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
-		"[GetNumberDPMLevel] Number of CLK Levels is zero!",
-		return -EINVAL);
+	vega12_read_arg_from_smc(hwmgr, num_of_levels);
+	PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+			"[GetNumOfDpmLevel] number of clk levels is invalid!",
+			return -EINVAL);
 
-	return result;
+	return ret;
 }
 
 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
@@ -509,6 +502,31 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
+static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
+		struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
+{
+	int ret = 0;
+	uint32_t i, num_of_levels, clk;
+
+	ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
+	PP_ASSERT_WITH_CODE(!ret,
+			"[SetupSingleDpmTable] failed to get clk levels!",
+			return ret);
+
+	dpm_table->count = num_of_levels;
+
+	for (i = 0; i < num_of_levels; i++) {
+		ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
+		PP_ASSERT_WITH_CODE(!ret,
+			"[SetupSingleDpmTable] failed to get clk of specific level!",
+			return ret);
+		dpm_table->dpm_levels[i].value = clk;
+		dpm_table->dpm_levels[i].enabled = true;
+	}
+
+	return ret;
+}
+
 /*
  * This function is to initialize all DPM state tables
  * for SMU based on the dependency table.
@@ -519,224 +537,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
  */
 static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
-	uint32_t num_levels, i, clock;
 
 	struct vega12_hwmgr *data =
 			(struct vega12_hwmgr *)(hwmgr->backend);
-
 	struct vega12_single_dpm_table *dpm_table;
+	int ret = 0;
 
 	memset(&data->dpm_table, 0, sizeof(data->dpm_table));
 
-	/* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
+	/* socclk */
 	dpm_table = &(data->dpm_table.soc_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
-		&num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_SOCCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
+	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get socclk dpm levels!",
+				return ret);
+	} else {
+		dpm_table->count = 1;
+		dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
 	}
-
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+	/* gfxclk */
 	dpm_table = &(data->dpm_table.gfx_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
-		&num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_GFXCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
+	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
+				return ret);
+	} else {
+		dpm_table->count = 1;
+		dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
 	}
-
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
-	/* Initialize Mclk DPM table based on allow Mclk values */
-	dpm_table = &(data->dpm_table.mem_table);
 
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
-		&num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_UCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
+	/* memclk */
+	dpm_table = &(data->dpm_table.mem_table);
+	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get memclk dpm levels!",
+				return ret);
+	} else {
+		dpm_table->count = 1;
+		dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
 	}
-
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+	/* eclk */
 	dpm_table = &(data->dpm_table.eclk_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
-		&num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-		PPCLK_ECLK, i, &clock) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
-		return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
+	if (data->smu_features[GNLD_DPM_VCE].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get eclk dpm levels!",
+				return ret);
+	} else {
+		dpm_table->count = 1;
+		dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
 	}
-
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+	/* vclk */
 	dpm_table = &(data->dpm_table.vclk_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
-		&num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_VCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
+	if (data->smu_features[GNLD_DPM_UVD].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get vclk dpm levels!",
+				return ret);
+	} else {
+		dpm_table->count = 1;
+		dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
 	}
-
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+	/* dclk */
 	dpm_table = &(data->dpm_table.dclk_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
-		&num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_DCLK, i, &clock) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
-		return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
+	if (data->smu_features[GNLD_DPM_UVD].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get dclk dpm levels!",
+				return ret);
+	} else {
+		dpm_table->count = 1;
+		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
 	}
-
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
-	/* Assume there is no headless Vega12 for now */
+	/* dcefclk */
 	dpm_table = &(data->dpm_table.dcef_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-		PPCLK_DCEFCLK, &num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_DCEFCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
+	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
+				return ret);
+	} else {
+		dpm_table->count = 1;
+		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
 	}
-
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+	/* pixclk */
 	dpm_table = &(data->dpm_table.pixel_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-		PPCLK_PIXCLK, &num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_PIXCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
-	}
-
+	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
+				return ret);
+	} else
+		dpm_table->count = 0;
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+	/* dispclk */
 	dpm_table = &(data->dpm_table.display_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-		PPCLK_DISPCLK, &num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_DISPCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
-	}
-
+	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
+				return ret);
+	} else
+		dpm_table->count = 0;
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+	/* phyclk */
 	dpm_table = &(data->dpm_table.phy_table);
-
-	PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-		PPCLK_PHYCLK, &num_levels) == 0,
-		"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
-		return -EINVAL);
-
-	dpm_table->count = num_levels;
-
-	for (i = 0; i < num_levels; i++) {
-		PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-			PPCLK_PHYCLK, i, &clock) == 0,
-			"[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
-			return -EINVAL);
-
-		dpm_table->dpm_levels[i].value = clock;
-		dpm_table->dpm_levels[i].enabled = true;
-	}
-
+	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+		ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
+		PP_ASSERT_WITH_CODE(!ret,
+				"[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
+				return ret);
+	} else
+		dpm_table->count = 0;
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
 
 	/* save a copy of the default DPM table */
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 05/13] drm/amd/powerplay: retrieve all clock ranges on startup
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (2 preceding siblings ...)
  2018-06-19  7:38   ` [PATCH 04/13] drm/amd/powerplay: revise default dpm tables setup Evan Quan
@ 2018-06-19  7:38   ` Evan Quan
       [not found]     ` <1529393945-16629-5-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:38   ` [PATCH 06/13] drm/amd/powerplay: revise clock level setup Evan Quan
                     ` (7 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:38 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

So that we do not need to use PPSMC_MSG_GetMin/MaxDpmFreq to
get the clock ranges on runtime. Since that causes some problems.

Change-Id: Ia0d6390c976749538b35c8ffde5d1e661b4944c0
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 69 +++++++++++++++++-----
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |  8 +++
 2 files changed, 61 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index bc976e1..ea530af 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -856,6 +856,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
 	return result;
 }
 
+static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
+		PPCLK_e clkid, struct vega12_clock_range *clock)
+{
+	/* AC Max */
+	PP_ASSERT_WITH_CODE(
+		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+		"[GetClockRanges] Failed to get max ac clock from SMC!",
+		return -1);
+	vega12_read_arg_from_smc(hwmgr, &(clock->ACMax));
+
+	/* AC Min */
+	PP_ASSERT_WITH_CODE(
+		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+		"[GetClockRanges] Failed to get min ac clock from SMC!",
+		return -1);
+	vega12_read_arg_from_smc(hwmgr, &(clock->ACMin));
+
+	/* DC Max */
+	PP_ASSERT_WITH_CODE(
+		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+		"[GetClockRanges] Failed to get max dc clock from SMC!",
+		return -1);
+	vega12_read_arg_from_smc(hwmgr, &(clock->DCMax));
+
+	return 0;
+}
+
+static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
+{
+	struct vega12_hwmgr *data =
+			(struct vega12_hwmgr *)(hwmgr->backend);
+	uint32_t i;
+
+	for (i = 0; i < PPCLK_COUNT; i++)
+		PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
+					i, &(data->clk_range[i])),
+				"Failed to get clk range from SMC!",
+				return -1);
+
+	return 0;
+}
+
 static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result, result = 0;
@@ -883,6 +925,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 			"Failed to power control set level!",
 			result = tmp_result);
 
+	result = vega12_get_all_clock_ranges(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to get all clock ranges!",
+			return result);
+
 	result = vega12_odn_initialize_default_settings(hwmgr);
 	PP_ASSERT_WITH_CODE(!result,
 			"Failed to power control set level!",
@@ -1472,24 +1519,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
 		PPCLK_e clock_select,
 		bool max)
 {
-	int result;
-	*clock = 0;
+	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 
-	if (max) {
-		 PP_ASSERT_WITH_CODE(
-			smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
-			"[GetClockRanges] Failed to get max clock from SMC!",
-			return -1);
-		result = vega12_read_arg_from_smc(hwmgr, clock);
-	} else {
-		PP_ASSERT_WITH_CODE(
-			smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
-			"[GetClockRanges] Failed to get min clock from SMC!",
-			return -1);
-		result = vega12_read_arg_from_smc(hwmgr, clock);
-	}
+	if (max)
+		*clock = data->clk_range[clock_select].ACMax;
+	else
+		*clock = data->clk_range[clock_select].ACMin;
 
-	return result;
+	return 0;
 }
 
 static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index 49b38df..e18c083 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -304,6 +304,12 @@ struct vega12_odn_fan_table {
 	bool		force_fan_pwm;
 };
 
+struct vega12_clock_range {
+	uint32_t	ACMax;
+	uint32_t	ACMin;
+	uint32_t	DCMax;
+};
+
 struct vega12_hwmgr {
 	struct vega12_dpm_table          dpm_table;
 	struct vega12_dpm_table          golden_dpm_table;
@@ -385,6 +391,8 @@ struct vega12_hwmgr {
 	uint32_t                       smu_version;
 	struct smu_features            smu_features[GNLD_FEATURES_MAX];
 	struct vega12_smc_state_table  smc_state_table;
+
+	struct vega12_clock_range      clk_range[PPCLK_COUNT];
 };
 
 #define VEGA12_DPM2_NEAR_TDP_DEC                      10
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 06/13] drm/amd/powerplay: revise clock level setup
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (3 preceding siblings ...)
  2018-06-19  7:38   ` [PATCH 05/13] drm/amd/powerplay: retrieve all clock ranges on startup Evan Quan
@ 2018-06-19  7:38   ` Evan Quan
       [not found]     ` <1529393945-16629-6-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:38   ` [PATCH 07/13] drm/amd/powerplay: initialize uvd/vce powergate status Evan Quan
                     ` (6 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:38 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

Make sure the clock level set only on dpm enabled. Also uvd/vce/soc
clock also changed correspondingly.

Change-Id: I1db2e2ac355fd5aea1c0a25c2b140d039a590089
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 318 ++++++++++++++-------
 1 file changed, 211 insertions(+), 107 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index ea530af..a124b81 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -958,76 +958,172 @@ static uint32_t vega12_find_lowest_dpm_level(
 			break;
 	}
 
+	if (i >= table->count) {
+		i = 0;
+		table->dpm_levels[i].enabled = true;
+	}
+
 	return i;
 }
 
 static uint32_t vega12_find_highest_dpm_level(
 		struct vega12_single_dpm_table *table)
 {
-	uint32_t i = 0;
+	int32_t i = 0;
+	PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
+			"[FindHighestDPMLevel] DPM Table has too many entries!",
+			return MAX_REGULAR_DPM_NUMBER - 1);
 
-	if (table->count <= MAX_REGULAR_DPM_NUMBER) {
-		for (i = table->count; i > 0; i--) {
-			if (table->dpm_levels[i - 1].enabled)
-				return i - 1;
-		}
-	} else {
-		pr_info("DPM Table Has Too Many Entries!");
-		return MAX_REGULAR_DPM_NUMBER - 1;
+	for (i = table->count - 1; i >= 0; i--) {
+		if (table->dpm_levels[i].enabled)
+			break;
 	}
 
-	return i;
+	if (i < 0) {
+		i = 0;
+		table->dpm_levels[i].enabled = true;
+	}
+
+	return (uint32_t)i;
 }
 
 static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 {
 	struct vega12_hwmgr *data = hwmgr->backend;
-	if (data->smc_state_table.gfx_boot_level !=
-			data->dpm_table.gfx_table.dpm_state.soft_min_level) {
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetSoftMinByFreq,
-			PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
-		data->dpm_table.gfx_table.dpm_state.soft_min_level =
-				data->smc_state_table.gfx_boot_level;
+	uint32_t min_freq;
+	int ret = 0;
+
+	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+		min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+					(PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+					"Failed to set soft min gfxclk !",
+					return ret);
 	}
 
-	if (data->smc_state_table.mem_boot_level !=
-			data->dpm_table.mem_table.dpm_state.soft_min_level) {
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetSoftMinByFreq,
-			PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
-		data->dpm_table.mem_table.dpm_state.soft_min_level =
-				data->smc_state_table.mem_boot_level;
+	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+		min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+					(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+					"Failed to set soft min memclk !",
+					return ret);
+
+		min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetHardMinByFreq,
+					(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+					"Failed to set hard min memclk !",
+					return ret);
 	}
 
-	return 0;
+	if (data->smu_features[GNLD_DPM_UVD].enabled) {
+		min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+					(PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+					"Failed to set soft min vclk!",
+					return ret);
+
+		min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+					(PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+					"Failed to set soft min dclk!",
+					return ret);
+	}
+
+	if (data->smu_features[GNLD_DPM_VCE].enabled) {
+		min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+					(PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+					"Failed to set soft min eclk!",
+					return ret);
+	}
+
+	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+		min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+					(PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+					"Failed to set soft min socclk!",
+					return ret);
+	}
+
+	return ret;
 
 }
 
 static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 {
 	struct vega12_hwmgr *data = hwmgr->backend;
-	if (data->smc_state_table.gfx_max_level !=
-		data->dpm_table.gfx_table.dpm_state.soft_max_level) {
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetSoftMaxByFreq,
-			/* plus the vale by 1 to align the resolution */
-			PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
-		data->dpm_table.gfx_table.dpm_state.soft_max_level =
-				data->smc_state_table.gfx_max_level;
+	uint32_t max_freq;
+	int ret = 0;
+
+	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+		max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+					(PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+					"Failed to set soft max gfxclk!",
+					return ret);
 	}
 
-	if (data->smc_state_table.mem_max_level !=
-		data->dpm_table.mem_table.dpm_state.soft_max_level) {
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_SetSoftMaxByFreq,
-			/* plus the vale by 1 to align the resolution */
-			PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
-		data->dpm_table.mem_table.dpm_state.soft_max_level =
-				data->smc_state_table.mem_max_level;
+	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+		max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+					(PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+					"Failed to set soft max memclk!",
+					return ret);
 	}
 
-	return 0;
+	if (data->smu_features[GNLD_DPM_UVD].enabled) {
+		max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+					(PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+					"Failed to set soft max vclk!",
+					return ret);
+
+		max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+					(PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+					"Failed to set soft max dclk!",
+					return ret);
+	}
+
+	if (data->smu_features[GNLD_DPM_VCE].enabled) {
+		max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+					(PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+					"Failed to set soft max eclk!",
+					return ret);
+	}
+
+	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+		max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
+
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+					(PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+					"Failed to set soft max socclk!",
+					return ret);
+	}
+
+	return ret;
 }
 
 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
@@ -1330,12 +1426,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
 	struct vega12_hwmgr *data =
 			(struct vega12_hwmgr *)(hwmgr->backend);
 
-	data->smc_state_table.gfx_boot_level =
-	data->smc_state_table.gfx_max_level =
-			vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
-	data->smc_state_table.mem_boot_level =
-	data->smc_state_table.mem_max_level =
-			vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+	uint32_t soft_level;
+
+	soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+	data->dpm_table.gfx_table.dpm_state.soft_min_level =
+		data->dpm_table.gfx_table.dpm_state.soft_max_level =
+		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+	soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+	data->dpm_table.mem_table.dpm_state.soft_min_level =
+		data->dpm_table.mem_table.dpm_state.soft_max_level =
+		data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
 			"Failed to upload boot level to highest!",
@@ -1352,13 +1455,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 {
 	struct vega12_hwmgr *data =
 			(struct vega12_hwmgr *)(hwmgr->backend);
+	uint32_t soft_level;
+
+	soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+
+	data->dpm_table.gfx_table.dpm_state.soft_min_level =
+		data->dpm_table.gfx_table.dpm_state.soft_max_level =
+		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
 
-	data->smc_state_table.gfx_boot_level =
-	data->smc_state_table.gfx_max_level =
-			vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
-	data->smc_state_table.mem_boot_level =
-	data->smc_state_table.mem_max_level =
-			vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+	soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+
+	data->dpm_table.mem_table.dpm_state.soft_min_level =
+		data->dpm_table.mem_table.dpm_state.soft_max_level =
+		data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
 			"Failed to upload boot level to highest!",
@@ -1374,17 +1483,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 
 static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
-	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
-	data->smc_state_table.gfx_boot_level =
-			vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
-	data->smc_state_table.gfx_max_level =
-			vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
-	data->smc_state_table.mem_boot_level =
-			vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
-	data->smc_state_table.mem_max_level =
-			vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
-
 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
 			"Failed to upload DPM Bootup Levels!",
 			return -1);
@@ -1392,22 +1490,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 	PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
 			"Failed to upload DPM Max Levels!",
 			return -1);
+
 	return 0;
 }
 
-#if 0
 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
 				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
 {
-	struct phm_ppt_v2_information *table_info =
-			(struct phm_ppt_v2_information *)(hwmgr->pptable);
+	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+	struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+	struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+	struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
+
+	*sclk_mask = 0;
+	*mclk_mask = 0;
+	*soc_mask  = 0;
 
-	if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
-		table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
-		table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+	if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+	    mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
+	    soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
 		*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
-		*soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
 		*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
+		*soc_mask  = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
 	}
 
 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -1415,13 +1519,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
 		*mclk_mask = 0;
 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
-		*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
-		*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
-		*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+		*sclk_mask = gfx_dpm_table->count - 1;
+		*mclk_mask = mem_dpm_table->count - 1;
+		*soc_mask  = soc_dpm_table->count - 1;
 	}
+
 	return 0;
 }
-#endif
 
 static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 {
@@ -1445,11 +1549,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 				enum amd_dpm_forced_level level)
 {
 	int ret = 0;
-#if 0
 	uint32_t sclk_mask = 0;
 	uint32_t mclk_mask = 0;
 	uint32_t soc_mask = 0;
-#endif
 
 	switch (level) {
 	case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1465,27 +1567,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-#if 0
 		ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
 		if (ret)
 			return ret;
-		vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
-		vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
-#endif
+		vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
+		vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
 		break;
 	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
 	}
-#if 0
-	if (!ret) {
-		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
-			vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
-		else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
-			vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
-	}
-#endif
+
 	return ret;
 }
 
@@ -1745,37 +1838,48 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, uint32_t mask)
 {
 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
-	if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-				AMD_DPM_FORCED_LEVEL_LOW |
-				AMD_DPM_FORCED_LEVEL_HIGH))
-		return -EINVAL;
+	uint32_t soft_min_level, soft_max_level;
+	int ret = 0;
 
 	switch (type) {
 	case PP_SCLK:
-		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
-		data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+		soft_min_level = mask ? (ffs(mask) - 1) : 0;
+		soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+		data->dpm_table.gfx_table.dpm_state.soft_min_level =
+			data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+		data->dpm_table.gfx_table.dpm_state.soft_max_level =
+			data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
 
-		PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+		ret = vega12_upload_dpm_min_level(hwmgr);
+		PP_ASSERT_WITH_CODE(!ret,
 			"Failed to upload boot level to lowest!",
-			return -EINVAL);
+			return ret);
 
-		PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+		ret = vega12_upload_dpm_max_level(hwmgr);
+		PP_ASSERT_WITH_CODE(!ret,
 			"Failed to upload dpm max level to highest!",
-			return -EINVAL);
+			return ret);
 		break;
 
 	case PP_MCLK:
-		data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
-		data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+		soft_min_level = mask ? (ffs(mask) - 1) : 0;
+		soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+		data->dpm_table.mem_table.dpm_state.soft_min_level =
+			data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+		data->dpm_table.mem_table.dpm_state.soft_max_level =
+			data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
 
-		PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+		ret = vega12_upload_dpm_min_level(hwmgr);
+		PP_ASSERT_WITH_CODE(!ret,
 			"Failed to upload boot level to lowest!",
-			return -EINVAL);
+			return ret);
 
-		PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+		ret = vega12_upload_dpm_max_level(hwmgr);
+		PP_ASSERT_WITH_CODE(!ret,
 			"Failed to upload dpm max level to highest!",
-			return -EINVAL);
+			return ret);
 
 		break;
 
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 07/13] drm/amd/powerplay: initialize uvd/vce powergate status
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (4 preceding siblings ...)
  2018-06-19  7:38   ` [PATCH 06/13] drm/amd/powerplay: revise clock level setup Evan Quan
@ 2018-06-19  7:38   ` Evan Quan
       [not found]     ` <1529393945-16629-7-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:39   ` [PATCH 08/13] drm/amd/powerplay: correct smc display config setting Evan Quan
                     ` (5 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:38 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

On UVD/VCE dpm disabled, the powergate status should be
set as true.

Change-Id: I569a5aa216b5e7d64a2b504f2ff98cc83ca802d5
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index a124b81..cb0589e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -777,6 +777,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
+{
+	struct vega12_hwmgr *data =
+			(struct vega12_hwmgr *)(hwmgr->backend);
+
+	data->uvd_power_gated = true;
+	data->vce_power_gated = true;
+
+	if (data->smu_features[GNLD_DPM_UVD].enabled)
+		data->uvd_power_gated = false;
+
+	if (data->smu_features[GNLD_DPM_VCE].enabled)
+		data->vce_power_gated = false;
+}
+
 static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
 {
 	struct vega12_hwmgr *data =
@@ -801,6 +816,8 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
 		}
 	}
 
+	vega12_init_powergate_state(hwmgr);
+
 	return 0;
 }
 
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 08/13] drm/amd/powerplay: correct smc display config setting
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (5 preceding siblings ...)
  2018-06-19  7:38   ` [PATCH 07/13] drm/amd/powerplay: initialize uvd/vce powergate status Evan Quan
@ 2018-06-19  7:39   ` Evan Quan
       [not found]     ` <1529393945-16629-8-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:39   ` [PATCH 09/13] drm/amd/powerplay: correct vega12 max num of dpm level Evan Quan
                     ` (4 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:39 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

Multi monitor situation should be taked into consideration.
Also, there is no need to setup UCLK hard min clock level.

Change-Id: Icf1bc9b420a40433338d9071e386308d30999491
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 13 ++-----------
 1 file changed, 2 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index cb0589e..4732179 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1399,9 +1399,9 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
 			(struct vega12_hwmgr *)(hwmgr->backend);
 	struct PP_Clocks min_clocks = {0};
 	struct pp_display_clock_request clock_req;
-	uint32_t clk_request;
 
-	if (hwmgr->display_config->num_display > 1)
+	if ((hwmgr->display_config->num_display > 1) &&
+		!hwmgr->display_config->multi_monitor_in_sync)
 		vega12_notify_smc_display_change(hwmgr, false);
 	else
 		vega12_notify_smc_display_change(hwmgr, true);
@@ -1426,15 +1426,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
 		}
 	}
 
-	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
-		clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
-		PP_ASSERT_WITH_CODE(
-			smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
-			"[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
-			return -1);
-		data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
-	}
-
 	return 0;
 }
 
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 09/13] drm/amd/powerplay: correct vega12 max num of dpm level
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (6 preceding siblings ...)
  2018-06-19  7:39   ` [PATCH 08/13] drm/amd/powerplay: correct smc display config setting Evan Quan
@ 2018-06-19  7:39   ` Evan Quan
       [not found]     ` <1529393945-16629-9-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:39   ` [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change Evan Quan
                     ` (3 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:39 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

Use MAX_NUM_CLOCKS instead of VG12_PSUEDO* macros for
the max number of dpm levels.

Change-Id: Ida49f51777663a8d68d05ddcd41f4df0d8e61481
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 4732179..a227ace 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1642,8 +1642,8 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
 		return -1;
 
 	dpm_table = &(data->dpm_table.gfx_table);
-	ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
-		VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
+	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+		MAX_NUM_CLOCKS : dpm_table->count;
 
 	for (i = 0; i < ucount; i++) {
 		clocks->data[i].clocks_in_khz =
@@ -1674,11 +1674,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
 		return -1;
 
 	dpm_table = &(data->dpm_table.mem_table);
-	ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
-		VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
+	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+		MAX_NUM_CLOCKS : dpm_table->count;
 
 	for (i = 0; i < ucount; i++) {
 		clocks->data[i].clocks_in_khz =
+			data->mclk_latency_table.entries[i].frequency =
 			dpm_table->dpm_levels[i].value * 100;
 
 		clocks->data[i].latency_in_us =
@@ -1704,8 +1705,8 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
 
 
 	dpm_table = &(data->dpm_table.dcef_table);
-	ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
-		VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
+	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+		MAX_NUM_CLOCKS : dpm_table->count;
 
 	for (i = 0; i < ucount; i++) {
 		clocks->data[i].clocks_in_khz =
@@ -1732,8 +1733,8 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
 
 
 	dpm_table = &(data->dpm_table.soc_table);
-	ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
-		VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
+	ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+		MAX_NUM_CLOCKS : dpm_table->count;
 
 	for (i = 0; i < ucount; i++) {
 		clocks->data[i].clocks_in_khz =
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (7 preceding siblings ...)
  2018-06-19  7:39   ` [PATCH 09/13] drm/amd/powerplay: correct vega12 max num of dpm level Evan Quan
@ 2018-06-19  7:39   ` Evan Quan
       [not found]     ` <1529393945-16629-10-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:39   ` [PATCH 11/13] drm/amd/powerplay: set vega12 pre display configurations Evan Quan
                     ` (2 subsequent siblings)
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:39 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

The clocks hard/soft min/max clock levels will be adjusted
correspondingly.

Change-Id: I2c4b6cd6756d40a28933f0c26b9e1a3d5078bab8
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162 +++++++++++++++++++++
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |   2 +
 2 files changed, 164 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index a227ace..26bdfff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1950,6 +1950,166 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
 	return size;
 }
 
+static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+	struct vega12_single_dpm_table *dpm_table;
+	bool vblank_too_short = false;
+	bool disable_mclk_switching;
+	uint32_t i, latency;
+
+	disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+			          !hwmgr->display_config->multi_monitor_in_sync) ||
+			          vblank_too_short;
+	latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+
+	/* gfxclk */
+	dpm_table = &(data->dpm_table.gfx_table);
+	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+		if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		}
+	}
+
+	/* memclk */
+	dpm_table = &(data->dpm_table.mem_table);
+	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+		if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		}
+	}
+
+	/* honour DAL's UCLK Hardmin */
+	if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
+		dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
+
+	/* Hardmin is dependent on displayconfig */
+	if (disable_mclk_switching) {
+		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
+			if (data->mclk_latency_table.entries[i].latency <= latency) {
+				if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
+					dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+					break;
+				}
+			}
+		}
+	}
+
+	if (hwmgr->display_config->nb_pstate_switch_disable)
+		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+	/* vclk */
+	dpm_table = &(data->dpm_table.vclk_table);
+	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+		if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		}
+	}
+
+	/* dclk */
+	dpm_table = &(data->dpm_table.dclk_table);
+	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+		if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		}
+	}
+
+	/* socclk */
+	dpm_table = &(data->dpm_table.soc_table);
+	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+		if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		}
+	}
+
+	/* eclk */
+	dpm_table = &(data->dpm_table.eclk_table);
+	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+	dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+		if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+		}
+
+		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		}
+	}
+
+	return 0;
+}
+
 static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -2196,6 +2356,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
 	.display_clock_voltage_request = vega12_display_clock_voltage_request,
 	.force_clock_level = vega12_force_clock_level,
 	.print_clock_levels = vega12_print_clock_levels,
+	.apply_clocks_adjust_rules =
+		vega12_apply_clocks_adjust_rules,
 	.display_config_changed = vega12_display_configuration_changed_task,
 	.powergate_uvd = vega12_power_gate_uvd,
 	.powergate_vce = vega12_power_gate_vce,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e18c083..e17237c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -443,6 +443,8 @@ struct vega12_hwmgr {
 #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL         0x3
 #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL         0x3
 #define VEGA12_UMD_PSTATE_MCLK_LEVEL           0x2
+#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL         0x3
+#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL        0x3
 
 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
 
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 11/13] drm/amd/powerplay: set vega12 pre display configurations
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (8 preceding siblings ...)
  2018-06-19  7:39   ` [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change Evan Quan
@ 2018-06-19  7:39   ` Evan Quan
       [not found]     ` <1529393945-16629-11-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:39   ` [PATCH 12/13] drm/amd/powerplay: correct vega12 thermal support as true Evan Quan
  2018-06-19  7:39   ` [PATCH 13/13] drm/amd/powerplay: cosmetic fix Evan Quan
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:39 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

PPSMC_MSG_NumOfDisplays is set as 0 and uclk is forced as
highest.

Change-Id: I2400279d3c979d99f4dd4b8d53f051cd8f8e0c33
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 41 ++++++++++++++++++++++
 1 file changed, 41 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 26bdfff..1fadb71 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2110,6 +2110,45 @@ static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
+		struct vega12_single_dpm_table *dpm_table)
+{
+	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+	int ret = 0;
+
+	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+		PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+				"[SetUclkToHightestDpmLevel] Dpm table has no entry!",
+				return -EINVAL);
+		PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
+				"[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
+				return -EINVAL);
+
+		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetHardMinByFreq,
+				(PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+				"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
+				return ret);
+	}
+
+	return ret;
+}
+
+static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+	int ret = 0;
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_NumOfDisplays, 0);
+
+	ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
+			&data->dpm_table.mem_table);
+
+	return ret;
+}
+
 static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -2358,6 +2397,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
 	.print_clock_levels = vega12_print_clock_levels,
 	.apply_clocks_adjust_rules =
 		vega12_apply_clocks_adjust_rules,
+	.pre_display_config_changed =
+		vega12_pre_display_configuration_changed_task,
 	.display_config_changed = vega12_display_configuration_changed_task,
 	.powergate_uvd = vega12_power_gate_uvd,
 	.powergate_vce = vega12_power_gate_vce,
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 12/13] drm/amd/powerplay: correct vega12 thermal support as true
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (9 preceding siblings ...)
  2018-06-19  7:39   ` [PATCH 11/13] drm/amd/powerplay: set vega12 pre display configurations Evan Quan
@ 2018-06-19  7:39   ` Evan Quan
       [not found]     ` <1529393945-16629-12-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  2018-06-19  7:39   ` [PATCH 13/13] drm/amd/powerplay: cosmetic fix Evan Quan
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:39 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

Thermal support is enabled on vega12.

Change-Id: I7069a65c6b289dbfe4a12f81ff96e943e878e6fa
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 1fadb71..de61f86 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
 
 	data->registry_data.disallowed_features = 0x0;
 	data->registry_data.od_state_in_dc_support = 0;
+	data->registry_data.thermal_support = 1;
 	data->registry_data.skip_baco_hardware = 0;
 
 	data->registry_data.log_avfs_param = 0;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 13/13] drm/amd/powerplay: cosmetic fix
       [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
                     ` (10 preceding siblings ...)
  2018-06-19  7:39   ` [PATCH 12/13] drm/amd/powerplay: correct vega12 thermal support as true Evan Quan
@ 2018-06-19  7:39   ` Evan Quan
       [not found]     ` <1529393945-16629-13-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
  11 siblings, 1 reply; 30+ messages in thread
From: Evan Quan @ 2018-06-19  7:39 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Evan Quan

Fix coding style and drop unused variable.

Change-Id: I9630f39154ec6bc30115e75924b35bcbe028a1a4
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c     | 10 +++-------
 .../gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h  | 18 +++++++++---------
 2 files changed, 12 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index de61f86..a699416 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -811,9 +811,6 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
 			enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
 			data->smu_features[i].enabled = enabled;
 			data->smu_features[i].supported = enabled;
-			PP_ASSERT(
-				!data->smu_features[i].allowed || enabled,
-				"[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
 		}
 	}
 
@@ -1230,8 +1227,8 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
 
 	*gfx_freq = 0;
 
-	PP_ASSERT_WITH_CODE(
-			smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+	PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
 			"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
 			return -1);
 	PP_ASSERT_WITH_CODE(
@@ -1790,7 +1787,6 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 {
 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
-	int result = 0;
 	uint32_t i;
 
 	if (!data->registry_data.disable_water_mark &&
@@ -1841,7 +1837,7 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 		data->water_marks_bitmap &= ~WaterMarksLoaded;
 	}
 
-	return result;
+	return 0;
 }
 
 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index b08526f..b6ffd08 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,10 +412,10 @@ typedef struct {
   QuadraticInt_t    ReservedEquation2;
   QuadraticInt_t    ReservedEquation3;
 
-	uint16_t     MinVoltageUlvGfx;
-	uint16_t     MinVoltageUlvSoc;
+  uint16_t     MinVoltageUlvGfx;
+  uint16_t     MinVoltageUlvSoc;
 
-	uint32_t     Reserved[14];
+  uint32_t     Reserved[14];
 
 
 
@@ -483,9 +483,9 @@ typedef struct {
   uint8_t      padding8_4;
 
 
-	uint8_t      PllGfxclkSpreadEnabled;
-	uint8_t      PllGfxclkSpreadPercent;
-	uint16_t     PllGfxclkSpreadFreq;
+  uint8_t      PllGfxclkSpreadEnabled;
+  uint8_t      PllGfxclkSpreadPercent;
+  uint16_t     PllGfxclkSpreadFreq;
 
   uint8_t      UclkSpreadEnabled;
   uint8_t      UclkSpreadPercent;
@@ -495,9 +495,9 @@ typedef struct {
   uint8_t      SocclkSpreadPercent;
   uint16_t     SocclkSpreadFreq;
 
-	uint8_t      AcgGfxclkSpreadEnabled;
-	uint8_t      AcgGfxclkSpreadPercent;
-	uint16_t     AcgGfxclkSpreadFreq;
+  uint8_t      AcgGfxclkSpreadEnabled;
+  uint8_t      AcgGfxclkSpreadPercent;
+  uint16_t     AcgGfxclkSpreadFreq;
 
   uint8_t      Vr2_I2C_address;
   uint8_t      padding_vr2[3];
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH 02/13] drm/amd/powerplay: smc_dpm_info structure change
       [not found]     ` <1529393945-16629-2-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 14:57       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 14:57 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:38 AM, Evan Quan <evan.quan@amd.com> wrote:
> A new member Vr2_I2C_address is added.
>
> Change-Id: I9821365721c9d73e1d2df2f65dfa97f39f0425c6
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/include/atomfirmware.h                   | 5 ++++-
>  drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c           | 2 ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h           | 2 ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 2 ++
>  drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h    | 5 ++++-
>  5 files changed, 14 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
> index 092d800..33b4de4 100644
> --- a/drivers/gpu/drm/amd/include/atomfirmware.h
> +++ b/drivers/gpu/drm/amd/include/atomfirmware.h
> @@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
>         uint8_t  acggfxclkspreadpercent;
>         uint16_t acggfxclkspreadfreq;
>
> -       uint32_t boardreserved[10];
> +       uint8_t Vr2_I2C_address;
> +       uint8_t padding_vr2[3];
> +
> +       uint32_t boardreserved[9];
>  };
>
>  /*
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
> index aa2faff..d27c1c9 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
> @@ -699,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
>         param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
>         param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
>
> +       param->Vr2_I2C_address = info->Vr2_I2C_address;
> +
>         return 0;
>  }
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
> index 745bd38..22e2166 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
> @@ -210,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
>         uint8_t  acggfxclkspreadenabled;
>         uint8_t  acggfxclkspreadpercent;
>         uint16_t acggfxclkspreadfreq;
> +
> +       uint8_t Vr2_I2C_address;
>  };
>
>  int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
> index 888ddca..2991470 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
> @@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
>                 ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
>         }
>
> +       ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
> +
>         return 0;
>  }
>
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
> index 2f8a3b9..b08526f 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
> @@ -499,7 +499,10 @@ typedef struct {
>         uint8_t      AcgGfxclkSpreadPercent;
>         uint16_t     AcgGfxclkSpreadFreq;
>
> -       uint32_t     BoardReserved[10];
> +  uint8_t      Vr2_I2C_address;
> +  uint8_t      padding_vr2[3];
> +
> +  uint32_t     BoardReserved[9];
>
>
>    uint32_t     MmHubPadding[7];
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 03/13] drm/amd/powerplay: drop the acg fix
       [not found]     ` <1529393945-16629-3-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 14:57       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 14:57 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:38 AM, Evan Quan <evan.quan@amd.com> wrote:
> This workaround is not needed any more.
>
> Change-Id: I81cb20ecd52d242af26ca32860baacdb5ec126c9
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 6 ------
>  1 file changed, 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
> index 2991470..f4f366b 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
> @@ -224,12 +224,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
>         ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
>         ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
>
> -       /* 0xFFFF will disable the ACG feature */
> -       if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
> -               ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
> -               ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
> -       }
> -
>         ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
>
>         return 0;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 04/13] drm/amd/powerplay: revise default dpm tables setup
       [not found]     ` <1529393945-16629-4-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 14:59       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 14:59 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:38 AM, Evan Quan <evan.quan@amd.com> wrote:
> Initialize the soft/hard min/max level correctly and
> handle the dpm disabled situation.
>
> Change-Id: I9a1d303ee54ac4c9687f72c86097b008ae398c05
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 334 ++++++++-------------
>  1 file changed, 132 insertions(+), 202 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index e81661cc..bc976e1 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -453,37 +453,30 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
>   */
>  static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
>  {
> -       dpm_state->soft_min_level = 0xff;
> -       dpm_state->soft_max_level = 0xff;
> -       dpm_state->hard_min_level = 0xff;
> -       dpm_state->hard_max_level = 0xff;
> +       dpm_state->soft_min_level = 0x0;
> +       dpm_state->soft_max_level = 0xffff;
> +       dpm_state->hard_min_level = 0x0;
> +       dpm_state->hard_max_level = 0xffff;
>  }
>
> -static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
> -               PPCLK_e clkID, uint32_t *num_dpm_level)
> +static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
> +               PPCLK_e clk_id, uint32_t *num_of_levels)
>  {
> -       int result;
> -       /*
> -        * SMU expects the Clock ID to be in the top 16 bits.
> -        * Lower 16 bits specify the level however 0xFF is a
> -        * special argument the returns the total number of levels
> -        */
> -       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
> -               PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
> -               "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
> -               return -EINVAL);
> -
> -       result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
> +       int ret = 0;
>
> -       PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
> -               "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
> -               return -EINVAL);
> +       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
> +                       PPSMC_MSG_GetDpmFreqByIndex,
> +                       (clk_id << 16 | 0xFF));
> +       PP_ASSERT_WITH_CODE(!ret,
> +                       "[GetNumOfDpmLevel] failed to get dpm levels!",
> +                       return ret);
>
> -       PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
> -               "[GetNumberDPMLevel] Number of CLK Levels is zero!",
> -               return -EINVAL);
> +       vega12_read_arg_from_smc(hwmgr, num_of_levels);
> +       PP_ASSERT_WITH_CODE(*num_of_levels > 0,
> +                       "[GetNumOfDpmLevel] number of clk levels is invalid!",
> +                       return -EINVAL);
>
> -       return result;
> +       return ret;
>  }
>
>  static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
> @@ -509,6 +502,31 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
>         return result;
>  }
>
> +static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
> +               struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
> +{
> +       int ret = 0;
> +       uint32_t i, num_of_levels, clk;
> +
> +       ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
> +       PP_ASSERT_WITH_CODE(!ret,
> +                       "[SetupSingleDpmTable] failed to get clk levels!",
> +                       return ret);
> +
> +       dpm_table->count = num_of_levels;
> +
> +       for (i = 0; i < num_of_levels; i++) {
> +               ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                       "[SetupSingleDpmTable] failed to get clk of specific level!",
> +                       return ret);
> +               dpm_table->dpm_levels[i].value = clk;
> +               dpm_table->dpm_levels[i].enabled = true;
> +       }
> +
> +       return ret;
> +}
> +
>  /*
>   * This function is to initialize all DPM state tables
>   * for SMU based on the dependency table.
> @@ -519,224 +537,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
>   */
>  static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
>  {
> -       uint32_t num_levels, i, clock;
>
>         struct vega12_hwmgr *data =
>                         (struct vega12_hwmgr *)(hwmgr->backend);
> -
>         struct vega12_single_dpm_table *dpm_table;
> +       int ret = 0;
>
>         memset(&data->dpm_table, 0, sizeof(data->dpm_table));
>
> -       /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
> +       /* socclk */
>         dpm_table = &(data->dpm_table.soc_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
> -               &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_SOCCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> +       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
> +                               return ret);
> +       } else {
> +               dpm_table->count = 1;
> +               dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
>         }
> -
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> +       /* gfxclk */
>         dpm_table = &(data->dpm_table.gfx_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
> -               &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_GFXCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> +       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
> +                               return ret);
> +       } else {
> +               dpm_table->count = 1;
> +               dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
>         }
> -
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
> -       /* Initialize Mclk DPM table based on allow Mclk values */
> -       dpm_table = &(data->dpm_table.mem_table);
>
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
> -               &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_UCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> +       /* memclk */
> +       dpm_table = &(data->dpm_table.mem_table);
> +       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
> +                               return ret);
> +       } else {
> +               dpm_table->count = 1;
> +               dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
>         }
> -
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> +       /* eclk */
>         dpm_table = &(data->dpm_table.eclk_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
> -               &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -               PPCLK_ECLK, i, &clock) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
> -               return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> +       if (data->smu_features[GNLD_DPM_VCE].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
> +                               return ret);
> +       } else {
> +               dpm_table->count = 1;
> +               dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
>         }
> -
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> +       /* vclk */
>         dpm_table = &(data->dpm_table.vclk_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
> -               &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_VCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> +       if (data->smu_features[GNLD_DPM_UVD].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
> +                               return ret);
> +       } else {
> +               dpm_table->count = 1;
> +               dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
>         }
> -
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> +       /* dclk */
>         dpm_table = &(data->dpm_table.dclk_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
> -               &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_DCLK, i, &clock) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
> -               return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> +       if (data->smu_features[GNLD_DPM_UVD].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
> +                               return ret);
> +       } else {
> +               dpm_table->count = 1;
> +               dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
>         }
> -
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> -       /* Assume there is no headless Vega12 for now */
> +       /* dcefclk */
>         dpm_table = &(data->dpm_table.dcef_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
> -               PPCLK_DCEFCLK, &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_DCEFCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> +       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
> +                               return ret);
> +       } else {
> +               dpm_table->count = 1;
> +               dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
>         }
> -
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> +       /* pixclk */
>         dpm_table = &(data->dpm_table.pixel_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
> -               PPCLK_PIXCLK, &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_PIXCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> -       }
> -
> +       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
> +                               return ret);
> +       } else
> +               dpm_table->count = 0;
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> +       /* dispclk */
>         dpm_table = &(data->dpm_table.display_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
> -               PPCLK_DISPCLK, &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_DISPCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> -       }
> -
> +       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
> +                               return ret);
> +       } else
> +               dpm_table->count = 0;
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
> +       /* phyclk */
>         dpm_table = &(data->dpm_table.phy_table);
> -
> -       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
> -               PPCLK_PHYCLK, &num_levels) == 0,
> -               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
> -               return -EINVAL);
> -
> -       dpm_table->count = num_levels;
> -
> -       for (i = 0; i < num_levels; i++) {
> -               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
> -                       PPCLK_PHYCLK, i, &clock) == 0,
> -                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
> -                       return -EINVAL);
> -
> -               dpm_table->dpm_levels[i].value = clock;
> -               dpm_table->dpm_levels[i].enabled = true;
> -       }
> -
> +       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
> +               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
> +               PP_ASSERT_WITH_CODE(!ret,
> +                               "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
> +                               return ret);
> +       } else
> +               dpm_table->count = 0;
>         vega12_init_dpm_state(&(dpm_table->dpm_state));
>
>         /* save a copy of the default DPM table */
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 05/13] drm/amd/powerplay: retrieve all clock ranges on startup
       [not found]     ` <1529393945-16629-5-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:03       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:03 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:38 AM, Evan Quan <evan.quan@amd.com> wrote:
> So that we do not need to use PPSMC_MSG_GetMin/MaxDpmFreq to
> get the clock ranges on runtime. Since that causes some problems.
>
> Change-Id: Ia0d6390c976749538b35c8ffde5d1e661b4944c0
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 69 +++++++++++++++++-----
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |  8 +++
>  2 files changed, 61 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index bc976e1..ea530af 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -856,6 +856,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
>         return result;
>  }
>
> +static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
> +               PPCLK_e clkid, struct vega12_clock_range *clock)
> +{
> +       /* AC Max */
> +       PP_ASSERT_WITH_CODE(
> +               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
> +               "[GetClockRanges] Failed to get max ac clock from SMC!",
> +               return -1);


Please use a proper error code here (e.g., -EINVAL) rather than -1.

> +       vega12_read_arg_from_smc(hwmgr, &(clock->ACMax));
> +
> +       /* AC Min */
> +       PP_ASSERT_WITH_CODE(
> +               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
> +               "[GetClockRanges] Failed to get min ac clock from SMC!",
> +               return -1);

Same here.

> +       vega12_read_arg_from_smc(hwmgr, &(clock->ACMin));
> +
> +       /* DC Max */
> +       PP_ASSERT_WITH_CODE(
> +               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
> +               "[GetClockRanges] Failed to get max dc clock from SMC!",
> +               return -1);

and here.

> +       vega12_read_arg_from_smc(hwmgr, &(clock->DCMax));
> +
> +       return 0;
> +}
> +
> +static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
> +{
> +       struct vega12_hwmgr *data =
> +                       (struct vega12_hwmgr *)(hwmgr->backend);
> +       uint32_t i;
> +
> +       for (i = 0; i < PPCLK_COUNT; i++)
> +               PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
> +                                       i, &(data->clk_range[i])),
> +                               "Failed to get clk range from SMC!",
> +                               return -1);


And here.  With those fixed:
Acked-by: Alex Deucher <alexander.deucher@amd.com>

> +
> +       return 0;
> +}
> +
>  static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
>  {
>         int tmp_result, result = 0;
> @@ -883,6 +925,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
>                         "Failed to power control set level!",
>                         result = tmp_result);
>
> +       result = vega12_get_all_clock_ranges(hwmgr);
> +       PP_ASSERT_WITH_CODE(!result,
> +                       "Failed to get all clock ranges!",
> +                       return result);
> +
>         result = vega12_odn_initialize_default_settings(hwmgr);
>         PP_ASSERT_WITH_CODE(!result,
>                         "Failed to power control set level!",
> @@ -1472,24 +1519,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
>                 PPCLK_e clock_select,
>                 bool max)
>  {
> -       int result;
> -       *clock = 0;
> +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
>
> -       if (max) {
> -                PP_ASSERT_WITH_CODE(
> -                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
> -                       "[GetClockRanges] Failed to get max clock from SMC!",
> -                       return -1);
> -               result = vega12_read_arg_from_smc(hwmgr, clock);
> -       } else {
> -               PP_ASSERT_WITH_CODE(
> -                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
> -                       "[GetClockRanges] Failed to get min clock from SMC!",
> -                       return -1);
> -               result = vega12_read_arg_from_smc(hwmgr, clock);
> -       }
> +       if (max)
> +               *clock = data->clk_range[clock_select].ACMax;
> +       else
> +               *clock = data->clk_range[clock_select].ACMin;
>
> -       return result;
> +       return 0;
>  }
>
>  static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> index 49b38df..e18c083 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> @@ -304,6 +304,12 @@ struct vega12_odn_fan_table {
>         bool            force_fan_pwm;
>  };
>
> +struct vega12_clock_range {
> +       uint32_t        ACMax;
> +       uint32_t        ACMin;
> +       uint32_t        DCMax;
> +};
> +
>  struct vega12_hwmgr {
>         struct vega12_dpm_table          dpm_table;
>         struct vega12_dpm_table          golden_dpm_table;
> @@ -385,6 +391,8 @@ struct vega12_hwmgr {
>         uint32_t                       smu_version;
>         struct smu_features            smu_features[GNLD_FEATURES_MAX];
>         struct vega12_smc_state_table  smc_state_table;
> +
> +       struct vega12_clock_range      clk_range[PPCLK_COUNT];
>  };
>
>  #define VEGA12_DPM2_NEAR_TDP_DEC                      10
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 06/13] drm/amd/powerplay: revise clock level setup
       [not found]     ` <1529393945-16629-6-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:07       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:07 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:38 AM, Evan Quan <evan.quan@amd.com> wrote:
> Make sure the clock level set only on dpm enabled. Also uvd/vce/soc
> clock also changed correspondingly.
>
> Change-Id: I1db2e2ac355fd5aea1c0a25c2b140d039a590089
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 318 ++++++++++++++-------
>  1 file changed, 211 insertions(+), 107 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index ea530af..a124b81 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -958,76 +958,172 @@ static uint32_t vega12_find_lowest_dpm_level(
>                         break;
>         }
>
> +       if (i >= table->count) {
> +               i = 0;
> +               table->dpm_levels[i].enabled = true;
> +       }
> +
>         return i;
>  }
>
>  static uint32_t vega12_find_highest_dpm_level(
>                 struct vega12_single_dpm_table *table)
>  {
> -       uint32_t i = 0;
> +       int32_t i = 0;
> +       PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
> +                       "[FindHighestDPMLevel] DPM Table has too many entries!",
> +                       return MAX_REGULAR_DPM_NUMBER - 1);
>
> -       if (table->count <= MAX_REGULAR_DPM_NUMBER) {
> -               for (i = table->count; i > 0; i--) {
> -                       if (table->dpm_levels[i - 1].enabled)
> -                               return i - 1;
> -               }
> -       } else {
> -               pr_info("DPM Table Has Too Many Entries!");
> -               return MAX_REGULAR_DPM_NUMBER - 1;
> +       for (i = table->count - 1; i >= 0; i--) {
> +               if (table->dpm_levels[i].enabled)
> +                       break;
>         }
>
> -       return i;
> +       if (i < 0) {
> +               i = 0;
> +               table->dpm_levels[i].enabled = true;
> +       }
> +
> +       return (uint32_t)i;
>  }
>
>  static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data = hwmgr->backend;
> -       if (data->smc_state_table.gfx_boot_level !=
> -                       data->dpm_table.gfx_table.dpm_state.soft_min_level) {
> -               smum_send_msg_to_smc_with_parameter(hwmgr,
> -                       PPSMC_MSG_SetSoftMinByFreq,
> -                       PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
> -               data->dpm_table.gfx_table.dpm_state.soft_min_level =
> -                               data->smc_state_table.gfx_boot_level;
> +       uint32_t min_freq;
> +       int ret = 0;
> +
> +       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
> +               min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
> +                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
> +                                       "Failed to set soft min gfxclk !",
> +                                       return ret);
>         }
>
> -       if (data->smc_state_table.mem_boot_level !=
> -                       data->dpm_table.mem_table.dpm_state.soft_min_level) {
> -               smum_send_msg_to_smc_with_parameter(hwmgr,
> -                       PPSMC_MSG_SetSoftMinByFreq,
> -                       PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
> -               data->dpm_table.mem_table.dpm_state.soft_min_level =
> -                               data->smc_state_table.mem_boot_level;
> +       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
> +               min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
> +                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
> +                                       "Failed to set soft min memclk !",
> +                                       return ret);
> +
> +               min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetHardMinByFreq,
> +                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
> +                                       "Failed to set hard min memclk !",
> +                                       return ret);
>         }
>
> -       return 0;
> +       if (data->smu_features[GNLD_DPM_UVD].enabled) {
> +               min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
> +                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
> +                                       "Failed to set soft min vclk!",
> +                                       return ret);
> +
> +               min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
> +                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
> +                                       "Failed to set soft min dclk!",
> +                                       return ret);
> +       }
> +
> +       if (data->smu_features[GNLD_DPM_VCE].enabled) {
> +               min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
> +                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
> +                                       "Failed to set soft min eclk!",
> +                                       return ret);
> +       }
> +
> +       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
> +               min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
> +                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
> +                                       "Failed to set soft min socclk!",
> +                                       return ret);
> +       }
> +
> +       return ret;
>
>  }
>
>  static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data = hwmgr->backend;
> -       if (data->smc_state_table.gfx_max_level !=
> -               data->dpm_table.gfx_table.dpm_state.soft_max_level) {
> -               smum_send_msg_to_smc_with_parameter(hwmgr,
> -                       PPSMC_MSG_SetSoftMaxByFreq,
> -                       /* plus the vale by 1 to align the resolution */
> -                       PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
> -               data->dpm_table.gfx_table.dpm_state.soft_max_level =
> -                               data->smc_state_table.gfx_max_level;
> +       uint32_t max_freq;
> +       int ret = 0;
> +
> +       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
> +               max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
> +                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
> +                                       "Failed to set soft max gfxclk!",
> +                                       return ret);
>         }
>
> -       if (data->smc_state_table.mem_max_level !=
> -               data->dpm_table.mem_table.dpm_state.soft_max_level) {
> -               smum_send_msg_to_smc_with_parameter(hwmgr,
> -                       PPSMC_MSG_SetSoftMaxByFreq,
> -                       /* plus the vale by 1 to align the resolution */
> -                       PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
> -               data->dpm_table.mem_table.dpm_state.soft_max_level =
> -                               data->smc_state_table.mem_max_level;
> +       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
> +               max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
> +                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
> +                                       "Failed to set soft max memclk!",
> +                                       return ret);
>         }
>
> -       return 0;
> +       if (data->smu_features[GNLD_DPM_UVD].enabled) {
> +               max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
> +                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
> +                                       "Failed to set soft max vclk!",
> +                                       return ret);
> +
> +               max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
> +                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
> +                                       "Failed to set soft max dclk!",
> +                                       return ret);
> +       }
> +
> +       if (data->smu_features[GNLD_DPM_VCE].enabled) {
> +               max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
> +                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
> +                                       "Failed to set soft max eclk!",
> +                                       return ret);
> +       }
> +
> +       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
> +               max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
> +
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
> +                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
> +                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
> +                                       "Failed to set soft max socclk!",
> +                                       return ret);
> +       }
> +
> +       return ret;
>  }
>
>  int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
> @@ -1330,12 +1426,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
>         struct vega12_hwmgr *data =
>                         (struct vega12_hwmgr *)(hwmgr->backend);
>
> -       data->smc_state_table.gfx_boot_level =
> -       data->smc_state_table.gfx_max_level =
> -                       vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
> -       data->smc_state_table.mem_boot_level =
> -       data->smc_state_table.mem_max_level =
> -                       vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
> +       uint32_t soft_level;
> +
> +       soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
> +
> +       data->dpm_table.gfx_table.dpm_state.soft_min_level =
> +               data->dpm_table.gfx_table.dpm_state.soft_max_level =
> +               data->dpm_table.gfx_table.dpm_levels[soft_level].value;
> +
> +       soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
> +
> +       data->dpm_table.mem_table.dpm_state.soft_min_level =
> +               data->dpm_table.mem_table.dpm_state.soft_max_level =
> +               data->dpm_table.mem_table.dpm_levels[soft_level].value;
>
>         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
>                         "Failed to upload boot level to highest!",
> @@ -1352,13 +1455,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data =
>                         (struct vega12_hwmgr *)(hwmgr->backend);
> +       uint32_t soft_level;
> +
> +       soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
> +
> +       data->dpm_table.gfx_table.dpm_state.soft_min_level =
> +               data->dpm_table.gfx_table.dpm_state.soft_max_level =
> +               data->dpm_table.gfx_table.dpm_levels[soft_level].value;
>
> -       data->smc_state_table.gfx_boot_level =
> -       data->smc_state_table.gfx_max_level =
> -                       vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
> -       data->smc_state_table.mem_boot_level =
> -       data->smc_state_table.mem_max_level =
> -                       vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
> +       soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
> +
> +       data->dpm_table.mem_table.dpm_state.soft_min_level =
> +               data->dpm_table.mem_table.dpm_state.soft_max_level =
> +               data->dpm_table.mem_table.dpm_levels[soft_level].value;
>
>         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
>                         "Failed to upload boot level to highest!",
> @@ -1374,17 +1483,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
>
>  static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
>  {
> -       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> -
> -       data->smc_state_table.gfx_boot_level =
> -                       vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
> -       data->smc_state_table.gfx_max_level =
> -                       vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
> -       data->smc_state_table.mem_boot_level =
> -                       vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
> -       data->smc_state_table.mem_max_level =
> -                       vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
> -
>         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
>                         "Failed to upload DPM Bootup Levels!",
>                         return -1);
> @@ -1392,22 +1490,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
>         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
>                         "Failed to upload DPM Max Levels!",
>                         return -1);
> +
>         return 0;
>  }
>
> -#if 0
>  static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
>                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
>  {
> -       struct phm_ppt_v2_information *table_info =
> -                       (struct phm_ppt_v2_information *)(hwmgr->pptable);
> +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> +       struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
> +       struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
> +       struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
> +
> +       *sclk_mask = 0;
> +       *mclk_mask = 0;
> +       *soc_mask  = 0;
>
> -       if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
> -               table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
> -               table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
> +       if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
> +           mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
> +           soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
>                 *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
> -               *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
>                 *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
> +               *soc_mask  = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
>         }
>
>         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
> @@ -1415,13 +1519,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
>         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
>                 *mclk_mask = 0;
>         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> -               *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
> -               *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
> -               *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
> +               *sclk_mask = gfx_dpm_table->count - 1;
> +               *mclk_mask = mem_dpm_table->count - 1;
> +               *soc_mask  = soc_dpm_table->count - 1;
>         }
> +
>         return 0;
>  }
> -#endif
>
>  static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
>  {
> @@ -1445,11 +1549,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>                                 enum amd_dpm_forced_level level)
>  {
>         int ret = 0;
> -#if 0
>         uint32_t sclk_mask = 0;
>         uint32_t mclk_mask = 0;
>         uint32_t soc_mask = 0;
> -#endif
>
>         switch (level) {
>         case AMD_DPM_FORCED_LEVEL_HIGH:
> @@ -1465,27 +1567,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
>         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
>         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
> -#if 0
>                 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
>                 if (ret)
>                         return ret;
> -               vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
> -               vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> -#endif
> +               vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
> +               vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
>                 break;
>         case AMD_DPM_FORCED_LEVEL_MANUAL:
>         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>         default:
>                 break;
>         }
> -#if 0
> -       if (!ret) {
> -               if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
> -                       vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
> -               else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
> -                       vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
> -       }
> -#endif
> +
>         return ret;
>  }
>
> @@ -1745,37 +1838,48 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
>                 enum pp_clock_type type, uint32_t mask)
>  {
>         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> -
> -       if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> -                               AMD_DPM_FORCED_LEVEL_LOW |
> -                               AMD_DPM_FORCED_LEVEL_HIGH))
> -               return -EINVAL;
> +       uint32_t soft_min_level, soft_max_level;
> +       int ret = 0;
>
>         switch (type) {
>         case PP_SCLK:
> -               data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
> -               data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
> +               soft_min_level = mask ? (ffs(mask) - 1) : 0;
> +               soft_max_level = mask ? (fls(mask) - 1) : 0;
> +
> +               data->dpm_table.gfx_table.dpm_state.soft_min_level =
> +                       data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
> +               data->dpm_table.gfx_table.dpm_state.soft_max_level =
> +                       data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
>
> -               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
> +               ret = vega12_upload_dpm_min_level(hwmgr);
> +               PP_ASSERT_WITH_CODE(!ret,
>                         "Failed to upload boot level to lowest!",
> -                       return -EINVAL);
> +                       return ret);
>
> -               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
> +               ret = vega12_upload_dpm_max_level(hwmgr);
> +               PP_ASSERT_WITH_CODE(!ret,
>                         "Failed to upload dpm max level to highest!",
> -                       return -EINVAL);
> +                       return ret);
>                 break;
>
>         case PP_MCLK:
> -               data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
> -               data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
> +               soft_min_level = mask ? (ffs(mask) - 1) : 0;
> +               soft_max_level = mask ? (fls(mask) - 1) : 0;
> +
> +               data->dpm_table.mem_table.dpm_state.soft_min_level =
> +                       data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
> +               data->dpm_table.mem_table.dpm_state.soft_max_level =
> +                       data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
>
> -               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
> +               ret = vega12_upload_dpm_min_level(hwmgr);
> +               PP_ASSERT_WITH_CODE(!ret,
>                         "Failed to upload boot level to lowest!",
> -                       return -EINVAL);
> +                       return ret);
>
> -               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
> +               ret = vega12_upload_dpm_max_level(hwmgr);
> +               PP_ASSERT_WITH_CODE(!ret,
>                         "Failed to upload dpm max level to highest!",
> -                       return -EINVAL);
> +                       return ret);
>
>                 break;
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/13] drm/amd/powerplay: initialize uvd/vce powergate status
       [not found]     ` <1529393945-16629-7-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:10       ` Alex Deucher
       [not found]         ` <CADnq5_NSa8Bh_ffmz12Zu2XH4b3GawMdz_6vTF5NbFp_pLaLYA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:10 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:38 AM, Evan Quan <evan.quan@amd.com> wrote:
> On UVD/VCE dpm disabled, the powergate status should be
> set as true.

Can you explain this patch a bit?  Why is power gate state set to true
when dpm is disabled?

Alex

>
> Change-Id: I569a5aa216b5e7d64a2b504f2ff98cc83ca802d5
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 17 +++++++++++++++++
>  1 file changed, 17 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index a124b81..cb0589e 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -777,6 +777,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
>         return 0;
>  }
>
> +static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
> +{
> +       struct vega12_hwmgr *data =
> +                       (struct vega12_hwmgr *)(hwmgr->backend);
> +
> +       data->uvd_power_gated = true;
> +       data->vce_power_gated = true;
> +
> +       if (data->smu_features[GNLD_DPM_UVD].enabled)
> +               data->uvd_power_gated = false;
> +
> +       if (data->smu_features[GNLD_DPM_VCE].enabled)
> +               data->vce_power_gated = false;
> +}
> +
>  static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data =
> @@ -801,6 +816,8 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
>                 }
>         }
>
> +       vega12_init_powergate_state(hwmgr);
> +
>         return 0;
>  }
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 08/13] drm/amd/powerplay: correct smc display config setting
       [not found]     ` <1529393945-16629-8-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:13       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:13 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com> wrote:
> Multi monitor situation should be taked into consideration.
> Also, there is no need to setup UCLK hard min clock level.

This looks like it should be two patches since there are two distinct
changes.  Also please extend the commit messages a bit (e.g., "need to
take into account multi-head with synced displays" and "we don't need
to set a uclk hard min because...").  With that fixed:
Acked-by: Alex Deucher <alexander.deucher@amd.com>

>
> Change-Id: Icf1bc9b420a40433338d9071e386308d30999491
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 13 ++-----------
>  1 file changed, 2 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index cb0589e..4732179 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1399,9 +1399,9 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
>                         (struct vega12_hwmgr *)(hwmgr->backend);
>         struct PP_Clocks min_clocks = {0};
>         struct pp_display_clock_request clock_req;
> -       uint32_t clk_request;
>
> -       if (hwmgr->display_config->num_display > 1)
> +       if ((hwmgr->display_config->num_display > 1) &&
> +               !hwmgr->display_config->multi_monitor_in_sync)
>                 vega12_notify_smc_display_change(hwmgr, false);
>         else
>                 vega12_notify_smc_display_change(hwmgr, true);
> @@ -1426,15 +1426,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
>                 }
>         }
>
> -       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
> -               clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
> -               PP_ASSERT_WITH_CODE(
> -                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
> -                       "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
> -                       return -1);
> -               data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
> -       }
> -
>         return 0;
>  }
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 09/13] drm/amd/powerplay: correct vega12 max num of dpm level
       [not found]     ` <1529393945-16629-9-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:13       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:13 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com> wrote:
> Use MAX_NUM_CLOCKS instead of VG12_PSUEDO* macros for
> the max number of dpm levels.
>
> Change-Id: Ida49f51777663a8d68d05ddcd41f4df0d8e61481
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index 4732179..a227ace 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1642,8 +1642,8 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
>                 return -1;
>
>         dpm_table = &(data->dpm_table.gfx_table);
> -       ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
> -               VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
> +       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
> +               MAX_NUM_CLOCKS : dpm_table->count;
>
>         for (i = 0; i < ucount; i++) {
>                 clocks->data[i].clocks_in_khz =
> @@ -1674,11 +1674,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
>                 return -1;
>
>         dpm_table = &(data->dpm_table.mem_table);
> -       ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
> -               VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
> +       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
> +               MAX_NUM_CLOCKS : dpm_table->count;
>
>         for (i = 0; i < ucount; i++) {
>                 clocks->data[i].clocks_in_khz =
> +                       data->mclk_latency_table.entries[i].frequency =
>                         dpm_table->dpm_levels[i].value * 100;
>
>                 clocks->data[i].latency_in_us =
> @@ -1704,8 +1705,8 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
>
>
>         dpm_table = &(data->dpm_table.dcef_table);
> -       ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
> -               VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
> +       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
> +               MAX_NUM_CLOCKS : dpm_table->count;
>
>         for (i = 0; i < ucount; i++) {
>                 clocks->data[i].clocks_in_khz =
> @@ -1732,8 +1733,8 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
>
>
>         dpm_table = &(data->dpm_table.soc_table);
> -       ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
> -               VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
> +       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
> +               MAX_NUM_CLOCKS : dpm_table->count;
>
>         for (i = 0; i < ucount; i++) {
>                 clocks->data[i].clocks_in_khz =
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change
       [not found]     ` <1529393945-16629-10-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:16       ` Alex Deucher
       [not found]         ` <CADnq5_PqQ0aRcA=8iUq9qLow+W6rwpXhsj2GKRskn3xjwEbicw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:16 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com> wrote:
> The clocks hard/soft min/max clock levels will be adjusted
> correspondingly.


Also note that this add the apply_clocks_adjust_rules callback which
is used to validate the clock settings on a power state change.  One
other comment below.

>
> Change-Id: I2c4b6cd6756d40a28933f0c26b9e1a3d5078bab8
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162 +++++++++++++++++++++
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |   2 +
>  2 files changed, 164 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index a227ace..26bdfff 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1950,6 +1950,166 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
>         return size;
>  }
>
> +static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
> +{
> +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> +       struct vega12_single_dpm_table *dpm_table;
> +       bool vblank_too_short = false;
> +       bool disable_mclk_switching;
> +       uint32_t i, latency;
> +
> +       disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
> +                                 !hwmgr->display_config->multi_monitor_in_sync) ||
> +                                 vblank_too_short;
> +       latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
> +
> +       /* gfxclk */
> +       dpm_table = &(data->dpm_table.gfx_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* memclk */
> +       dpm_table = &(data->dpm_table.mem_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* honour DAL's UCLK Hardmin */
> +       if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
> +               dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
> +

Didn't you just remove the uclk hard min setting in a previous patch?



> +       /* Hardmin is dependent on displayconfig */
> +       if (disable_mclk_switching) {
> +               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
> +                       if (data->mclk_latency_table.entries[i].latency <= latency) {
> +                               if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
> +                                       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
> +                                       break;
> +                               }
> +                       }
> +               }
> +       }
> +
> +       if (hwmgr->display_config->nb_pstate_switch_disable)
> +               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       /* vclk */
> +       dpm_table = &(data->dpm_table.vclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* dclk */
> +       dpm_table = &(data->dpm_table.dclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* socclk */
> +       dpm_table = &(data->dpm_table.soc_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* eclk */
> +       dpm_table = &(data->dpm_table.eclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       return 0;
> +}
> +
>  static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> @@ -2196,6 +2356,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
>         .display_clock_voltage_request = vega12_display_clock_voltage_request,
>         .force_clock_level = vega12_force_clock_level,
>         .print_clock_levels = vega12_print_clock_levels,
> +       .apply_clocks_adjust_rules =
> +               vega12_apply_clocks_adjust_rules,
>         .display_config_changed = vega12_display_configuration_changed_task,
>         .powergate_uvd = vega12_power_gate_uvd,
>         .powergate_vce = vega12_power_gate_vce,
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> index e18c083..e17237c 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> @@ -443,6 +443,8 @@ struct vega12_hwmgr {
>  #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL         0x3
>  #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL         0x3
>  #define VEGA12_UMD_PSTATE_MCLK_LEVEL           0x2
> +#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL         0x3
> +#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL        0x3
>
>  int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 11/13] drm/amd/powerplay: set vega12 pre display configurations
       [not found]     ` <1529393945-16629-11-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:18       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:18 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com> wrote:
> PPSMC_MSG_NumOfDisplays is set as 0 and uclk is forced as
> highest.

Adjust the commit message to make it clear that you set num_displays
to 0 and force uclk high as part of the mode set dequence.

With that fixed:
Acked-by: Alex Deucher <alexander.deucher@amd.com>

>
> Change-Id: I2400279d3c979d99f4dd4b8d53f051cd8f8e0c33
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 41 ++++++++++++++++++++++
>  1 file changed, 41 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index 26bdfff..1fadb71 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -2110,6 +2110,45 @@ static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
>         return 0;
>  }
>
> +static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
> +               struct vega12_single_dpm_table *dpm_table)
> +{
> +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> +       int ret = 0;
> +
> +       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
> +               PP_ASSERT_WITH_CODE(dpm_table->count > 0,
> +                               "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
> +                               return -EINVAL);
> +               PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
> +                               "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
> +                               return -EINVAL);
> +
> +               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
> +                               PPSMC_MSG_SetHardMinByFreq,
> +                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
> +                               "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
> +                               return ret);
> +       }
> +
> +       return ret;
> +}
> +
> +static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
> +{
> +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> +       int ret = 0;
> +
> +       smum_send_msg_to_smc_with_parameter(hwmgr,
> +                       PPSMC_MSG_NumOfDisplays, 0);
> +
> +       ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
> +                       &data->dpm_table.mem_table);
> +
> +       return ret;
> +}
> +
>  static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> @@ -2358,6 +2397,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
>         .print_clock_levels = vega12_print_clock_levels,
>         .apply_clocks_adjust_rules =
>                 vega12_apply_clocks_adjust_rules,
> +       .pre_display_config_changed =
> +               vega12_pre_display_configuration_changed_task,
>         .display_config_changed = vega12_display_configuration_changed_task,
>         .powergate_uvd = vega12_power_gate_uvd,
>         .powergate_vce = vega12_power_gate_vce,
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 12/13] drm/amd/powerplay: correct vega12 thermal support as true
       [not found]     ` <1529393945-16629-12-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:18       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:18 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com> wrote:
> Thermal support is enabled on vega12.
>
> Change-Id: I7069a65c6b289dbfe4a12f81ff96e943e878e6fa
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 1 +
>  1 file changed, 1 insertion(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index 1fadb71..de61f86 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
>
>         data->registry_data.disallowed_features = 0x0;
>         data->registry_data.od_state_in_dc_support = 0;
> +       data->registry_data.thermal_support = 1;
>         data->registry_data.skip_baco_hardware = 0;
>
>         data->registry_data.log_avfs_param = 0;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 13/13] drm/amd/powerplay: cosmetic fix
       [not found]     ` <1529393945-16629-13-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
@ 2018-06-19 15:19       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2018-06-19 15:19 UTC (permalink / raw)
  To: Evan Quan; +Cc: amd-gfx list

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com> wrote:
> Fix coding style and drop unused variable.
>
> Change-Id: I9630f39154ec6bc30115e75924b35bcbe028a1a4
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c     | 10 +++-------
>  .../gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h  | 18 +++++++++---------
>  2 files changed, 12 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index de61f86..a699416 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -811,9 +811,6 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
>                         enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
>                         data->smu_features[i].enabled = enabled;
>                         data->smu_features[i].supported = enabled;
> -                       PP_ASSERT(
> -                               !data->smu_features[i].allowed || enabled,
> -                               "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
>                 }
>         }
>
> @@ -1230,8 +1227,8 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
>
>         *gfx_freq = 0;
>
> -       PP_ASSERT_WITH_CODE(
> -                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
> +       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
> +                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
>                         "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
>                         return -1);
>         PP_ASSERT_WITH_CODE(
> @@ -1790,7 +1787,6 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
>  {
>         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
>         Watermarks_t *table = &(data->smc_state_table.water_marks_table);
> -       int result = 0;
>         uint32_t i;
>
>         if (!data->registry_data.disable_water_mark &&
> @@ -1841,7 +1837,7 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
>                 data->water_marks_bitmap &= ~WaterMarksLoaded;
>         }
>
> -       return result;
> +       return 0;
>  }
>
>  static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
> index b08526f..b6ffd08 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
> @@ -412,10 +412,10 @@ typedef struct {
>    QuadraticInt_t    ReservedEquation2;
>    QuadraticInt_t    ReservedEquation3;
>
> -       uint16_t     MinVoltageUlvGfx;
> -       uint16_t     MinVoltageUlvSoc;
> +  uint16_t     MinVoltageUlvGfx;
> +  uint16_t     MinVoltageUlvSoc;
>
> -       uint32_t     Reserved[14];
> +  uint32_t     Reserved[14];
>
>
>
> @@ -483,9 +483,9 @@ typedef struct {
>    uint8_t      padding8_4;
>
>
> -       uint8_t      PllGfxclkSpreadEnabled;
> -       uint8_t      PllGfxclkSpreadPercent;
> -       uint16_t     PllGfxclkSpreadFreq;
> +  uint8_t      PllGfxclkSpreadEnabled;
> +  uint8_t      PllGfxclkSpreadPercent;
> +  uint16_t     PllGfxclkSpreadFreq;
>
>    uint8_t      UclkSpreadEnabled;
>    uint8_t      UclkSpreadPercent;
> @@ -495,9 +495,9 @@ typedef struct {
>    uint8_t      SocclkSpreadPercent;
>    uint16_t     SocclkSpreadFreq;
>
> -       uint8_t      AcgGfxclkSpreadEnabled;
> -       uint8_t      AcgGfxclkSpreadPercent;
> -       uint16_t     AcgGfxclkSpreadFreq;
> +  uint8_t      AcgGfxclkSpreadEnabled;
> +  uint8_t      AcgGfxclkSpreadPercent;
> +  uint16_t     AcgGfxclkSpreadFreq;
>
>    uint8_t      Vr2_I2C_address;
>    uint8_t      padding_vr2[3];
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change
       [not found]         ` <CADnq5_PqQ0aRcA=8iUq9qLow+W6rwpXhsj2GKRskn3xjwEbicw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-06-19 15:43           ` Zhu, Rex
       [not found]             ` <CY4PR12MB1687A705AE09BB6492A5C156FB700-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  2018-06-20  6:17           ` Quan, Evan
  1 sibling, 1 reply; 30+ messages in thread
From: Zhu, Rex @ 2018-06-19 15:43 UTC (permalink / raw)
  To: Quan, Evan, Alex Deucher; +Cc: amd-gfx list


[-- Attachment #1.1: Type: text/plain, Size: 13243 bytes --]

Hi Evan,

did we need to check the following flags on vega12?will driver set those flags when user select the umd_pstate?

PHM_PlatformCaps_UMDPState/PHM_PlatformCaps_PState.

Best Regards
Rex


?? Outlook for Android<https://aka.ms/ghei36>

________________________________
From: amd-gfx <amd-gfx-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org> on behalf of Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Sent: Tuesday, June 19, 2018 11:16:44 PM
To: Quan, Evan
Cc: amd-gfx list
Subject: Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan-5C7GfCeVMHo@public.gmane.org> wrote:
> The clocks hard/soft min/max clock levels will be adjusted
> correspondingly.


Also note that this add the apply_clocks_adjust_rules callback which
is used to validate the clock settings on a power state change.  One
other comment below.

>
> Change-Id: I2c4b6cd6756d40a28933f0c26b9e1a3d5078bab8
> Signed-off-by: Evan Quan <evan.quan-5C7GfCeVMHo@public.gmane.org>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162 +++++++++++++++++++++
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |   2 +
>  2 files changed, 164 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index a227ace..26bdfff 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1950,6 +1950,166 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
>         return size;
>  }
>
> +static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
> +{
> +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> +       struct vega12_single_dpm_table *dpm_table;
> +       bool vblank_too_short = false;
> +       bool disable_mclk_switching;
> +       uint32_t i, latency;
> +
> +       disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
> +                                 !hwmgr->display_config->multi_monitor_in_sync) ||
> +                                 vblank_too_short;
> +       latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
> +
> +       /* gfxclk */
> +       dpm_table = &(data->dpm_table.gfx_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* memclk */
> +       dpm_table = &(data->dpm_table.mem_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* honour DAL's UCLK Hardmin */
> +       if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
> +               dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
> +

Didn't you just remove the uclk hard min setting in a previous patch?



> +       /* Hardmin is dependent on displayconfig */
> +       if (disable_mclk_switching) {
> +               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
> +                       if (data->mclk_latency_table.entries[i].latency <= latency) {
> +                               if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
> +                                       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
> +                                       break;
> +                               }
> +                       }
> +               }
> +       }
> +
> +       if (hwmgr->display_config->nb_pstate_switch_disable)
> +               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       /* vclk */
> +       dpm_table = &(data->dpm_table.vclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* dclk */
> +       dpm_table = &(data->dpm_table.dclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* socclk */
> +       dpm_table = &(data->dpm_table.soc_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* eclk */
> +       dpm_table = &(data->dpm_table.eclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       return 0;
> +}
> +
>  static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> @@ -2196,6 +2356,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
>         .display_clock_voltage_request = vega12_display_clock_voltage_request,
>         .force_clock_level = vega12_force_clock_level,
>         .print_clock_levels = vega12_print_clock_levels,
> +       .apply_clocks_adjust_rules =
> +               vega12_apply_clocks_adjust_rules,
>         .display_config_changed = vega12_display_configuration_changed_task,
>         .powergate_uvd = vega12_power_gate_uvd,
>         .powergate_vce = vega12_power_gate_vce,
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> index e18c083..e17237c 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> @@ -443,6 +443,8 @@ struct vega12_hwmgr {
>  #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL         0x3
>  #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL         0x3
>  #define VEGA12_UMD_PSTATE_MCLK_LEVEL           0x2
> +#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL         0x3
> +#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL        0x3
>
>  int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[-- Attachment #1.2: Type: text/html, Size: 27035 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change
       [not found]         ` <CADnq5_PqQ0aRcA=8iUq9qLow+W6rwpXhsj2GKRskn3xjwEbicw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2018-06-19 15:43           ` Zhu, Rex
@ 2018-06-20  6:17           ` Quan, Evan
       [not found]             ` <SN6PR12MB26567D8241FEECAF286CEBA8E4770-kxOKjb6HO/FeL/N0e1LXkAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  1 sibling, 1 reply; 30+ messages in thread
From: Quan, Evan @ 2018-06-20  6:17 UTC (permalink / raw)
  To: Alex Deucher; +Cc: amd-gfx list

Hi Alex,

Comment inline.

> -----Original Message-----
> From: Alex Deucher [mailto:alexdeucher@gmail.com]
> Sent: Tuesday, June 19, 2018 11:17 PM
> To: Quan, Evan <Evan.Quan@amd.com>
> Cc: amd-gfx list <amd-gfx@lists.freedesktop.org>
> Subject: Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on
> power state change
> 
> On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com> wrote:
> > The clocks hard/soft min/max clock levels will be adjusted
> > correspondingly.
> 
> 
> Also note that this add the apply_clocks_adjust_rules callback which is used
> to validate the clock settings on a power state change.  One other comment
> below.
Yes, this is for the apply_clocks_adjust_rules callback. I updated the patch description as below

drm/amd/powerplay: apply clocks adjust rules on power state change

This add the apply_clocks_adjust_rules callback which is used
to validate the clock settings on a power state change.
> >
> > Change-Id: I2c4b6cd6756d40a28933f0c26b9e1a3d5078bab8
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > ---
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162
> +++++++++++++++++++++
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |   2 +
> >  2 files changed, 164 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > index a227ace..26bdfff 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > @@ -1950,6 +1950,166 @@ static int vega12_print_clock_levels(struct
> pp_hwmgr *hwmgr,
> >         return size;
> >  }
> >
> > +static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) {
> > +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr-
> >backend);
> > +       struct vega12_single_dpm_table *dpm_table;
> > +       bool vblank_too_short = false;
> > +       bool disable_mclk_switching;
> > +       uint32_t i, latency;
> > +
> > +       disable_mclk_switching = ((1 < hwmgr->display_config->num_display)
> &&
> > +                                 !hwmgr->display_config->multi_monitor_in_sync) ||
> > +                                 vblank_too_short;
> > +       latency =
> > + hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
> > +
> > +       /* gfxclk */
> > +       dpm_table = &(data->dpm_table.gfx_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[0].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* memclk */
> > +       dpm_table = &(data->dpm_table.mem_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[0].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* honour DAL's UCLK Hardmin */
> > +       if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config-
> >min_mem_set_clock / 100))
> > +               dpm_table->dpm_state.hard_min_level =
> > + hwmgr->display_config->min_mem_set_clock / 100;
> > +
> 
> Didn't you just remove the uclk hard min setting in a previous patch?
> 
Yes, it was moved here together with other clocks' min settings.
> 
> > +       /* Hardmin is dependent on displayconfig */
> > +       if (disable_mclk_switching) {
> > +               dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
> > +                       if (data->mclk_latency_table.entries[i].latency <= latency) {
> > +                               if (dpm_table->dpm_levels[i].value >= (hwmgr-
> >display_config->min_mem_set_clock / 100)) {
> > +                                       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[i].value;
> > +                                       break;
> > +                               }
> > +                       }
> > +               }
> > +       }
> > +
> > +       if (hwmgr->display_config->nb_pstate_switch_disable)
> > +               dpm_table->dpm_state.hard_min_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       /* vclk */
> > +       dpm_table = &(data->dpm_table.vclk_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* dclk */
> > +       dpm_table = &(data->dpm_table.dclk_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* socclk */
> > +       dpm_table = &(data->dpm_table.soc_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* eclk */
> > +       dpm_table = &(data->dpm_table.eclk_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count)
> {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       return 0;
> > +}
> > +
> >  static int vega12_display_configuration_changed_task(struct pp_hwmgr
> > *hwmgr)  {
> >         struct vega12_hwmgr *data = (struct vega12_hwmgr
> > *)(hwmgr->backend); @@ -2196,6 +2356,8 @@ static const struct
> pp_hwmgr_func vega12_hwmgr_funcs = {
> >         .display_clock_voltage_request =
> vega12_display_clock_voltage_request,
> >         .force_clock_level = vega12_force_clock_level,
> >         .print_clock_levels = vega12_print_clock_levels,
> > +       .apply_clocks_adjust_rules =
> > +               vega12_apply_clocks_adjust_rules,
> >         .display_config_changed =
> vega12_display_configuration_changed_task,
> >         .powergate_uvd = vega12_power_gate_uvd,
> >         .powergate_vce = vega12_power_gate_vce, diff --git
> > a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > index e18c083..e17237c 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > @@ -443,6 +443,8 @@ struct vega12_hwmgr {
> >  #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL         0x3
> >  #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL         0x3
> >  #define VEGA12_UMD_PSTATE_MCLK_LEVEL           0x2
> > +#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL         0x3
> > +#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL        0x3
> >
> >  int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool
> > enable);
> >
> > --
> > 2.7.4
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change
       [not found]             ` <CY4PR12MB1687A705AE09BB6492A5C156FB700-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-06-20  6:22               ` Quan, Evan
  0 siblings, 0 replies; 30+ messages in thread
From: Quan, Evan @ 2018-06-20  6:22 UTC (permalink / raw)
  To: Zhu, Rex, Alex Deucher; +Cc: amd-gfx list


[-- Attachment #1.1: Type: text/plain, Size: 13744 bytes --]

Hi Rex,

Yes, per discussed, we may need another patch to consider how to set the PHM_PlatformCaps_UMDPState flag.
But for now, I will keep the patch as it is.

Regards,
Evan
From: Zhu, Rex
Sent: Tuesday, June 19, 2018 11:43 PM
To: Quan, Evan <Evan.Quan@amd.com>; Alex Deucher <alexdeucher@gmail.com>
Cc: amd-gfx list <amd-gfx@lists.freedesktop.org>
Subject: Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change

Hi Evan,
did we need to check the following flags on vega12?will driver set those flags when user select the umd_pstate?
PHM_PlatformCaps_UMDPState/PHM_PlatformCaps_PState.
Best Regards
Rex

获取 Outlook for Android<https://aka.ms/ghei36>

________________________________
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org<mailto:amd-gfx-bounces@lists.freedesktop.org>> on behalf of Alex Deucher <alexdeucher@gmail.com<mailto:alexdeucher@gmail.com>>
Sent: Tuesday, June 19, 2018 11:16:44 PM
To: Quan, Evan
Cc: amd-gfx list
Subject: Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change

On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan@amd.com<mailto:evan.quan@amd.com>> wrote:
> The clocks hard/soft min/max clock levels will be adjusted
> correspondingly.


Also note that this add the apply_clocks_adjust_rules callback which
is used to validate the clock settings on a power state change.  One
other comment below.

>
> Change-Id: I2c4b6cd6756d40a28933f0c26b9e1a3d5078bab8
> Signed-off-by: Evan Quan <evan.quan@amd.com<mailto:evan.quan@amd.com>>
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162 +++++++++++++++++++++
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |   2 +
>  2 files changed, 164 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index a227ace..26bdfff 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1950,6 +1950,166 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
>         return size;
>  }
>
> +static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
> +{
> +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> +       struct vega12_single_dpm_table *dpm_table;
> +       bool vblank_too_short = false;
> +       bool disable_mclk_switching;
> +       uint32_t i, latency;
> +
> +       disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
> +                                 !hwmgr->display_config->multi_monitor_in_sync) ||
> +                                 vblank_too_short;
> +       latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
> +
> +       /* gfxclk */
> +       dpm_table = &(data->dpm_table.gfx_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* memclk */
> +       dpm_table = &(data->dpm_table.mem_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* honour DAL's UCLK Hardmin */
> +       if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
> +               dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
> +

Didn't you just remove the uclk hard min setting in a previous patch?



> +       /* Hardmin is dependent on displayconfig */
> +       if (disable_mclk_switching) {
> +               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
> +                       if (data->mclk_latency_table.entries[i].latency <= latency) {
> +                               if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
> +                                       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
> +                                       break;
> +                               }
> +                       }
> +               }
> +       }
> +
> +       if (hwmgr->display_config->nb_pstate_switch_disable)
> +               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       /* vclk */
> +       dpm_table = &(data->dpm_table.vclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* dclk */
> +       dpm_table = &(data->dpm_table.dclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* socclk */
> +       dpm_table = &(data->dpm_table.soc_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       /* eclk */
> +       dpm_table = &(data->dpm_table.eclk_table);
> +       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
> +       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +
> +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> +               if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> +               }
> +
> +               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> +                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
> +               }
> +       }
> +
> +       return 0;
> +}
> +
>  static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
>  {
>         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
> @@ -2196,6 +2356,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
>         .display_clock_voltage_request = vega12_display_clock_voltage_request,
>         .force_clock_level = vega12_force_clock_level,
>         .print_clock_levels = vega12_print_clock_levels,
> +       .apply_clocks_adjust_rules =
> +               vega12_apply_clocks_adjust_rules,
>         .display_config_changed = vega12_display_configuration_changed_task,
>         .powergate_uvd = vega12_power_gate_uvd,
>         .powergate_vce = vega12_power_gate_vce,
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> index e18c083..e17237c 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> @@ -443,6 +443,8 @@ struct vega12_hwmgr {
>  #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL         0x3
>  #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL         0x3
>  #define VEGA12_UMD_PSTATE_MCLK_LEVEL           0x2
> +#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL         0x3
> +#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL        0x3
>
>  int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[-- Attachment #1.2: Type: text/html, Size: 31307 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: [PATCH 07/13] drm/amd/powerplay: initialize uvd/vce powergate status
       [not found]         ` <CADnq5_NSa8Bh_ffmz12Zu2XH4b3GawMdz_6vTF5NbFp_pLaLYA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-06-20  6:34           ` Quan, Evan
  0 siblings, 0 replies; 30+ messages in thread
From: Quan, Evan @ 2018-06-20  6:34 UTC (permalink / raw)
  To: Alex Deucher; +Cc: amd-gfx list

Hi Alex,

Just sent out a v2 version for this patch.

On UVD/VCE dpm disabled, the powergate status will be set as true. 
So that we will not try to gate them(disable their dpm again).

Regards,
Evan
> -----Original Message-----
> From: Alex Deucher [mailto:alexdeucher@gmail.com]
> Sent: Tuesday, June 19, 2018 11:10 PM
> To: Quan, Evan <Evan.Quan@amd.com>
> Cc: amd-gfx list <amd-gfx@lists.freedesktop.org>
> Subject: Re: [PATCH 07/13] drm/amd/powerplay: initialize uvd/vce
> powergate status
> 
> On Tue, Jun 19, 2018 at 3:38 AM, Evan Quan <evan.quan@amd.com> wrote:
> > On UVD/VCE dpm disabled, the powergate status should be set as true.
> 
> Can you explain this patch a bit?  Why is power gate state set to true when
> dpm is disabled?
> 
> Alex
> 
> >
> > Change-Id: I569a5aa216b5e7d64a2b504f2ff98cc83ca802d5
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > ---
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 17
> > +++++++++++++++++
> >  1 file changed, 17 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > index a124b81..cb0589e 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > @@ -777,6 +777,21 @@ static int
> vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
> >         return 0;
> >  }
> >
> > +static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr) {
> > +       struct vega12_hwmgr *data =
> > +                       (struct vega12_hwmgr *)(hwmgr->backend);
> > +
> > +       data->uvd_power_gated = true;
> > +       data->vce_power_gated = true;
> > +
> > +       if (data->smu_features[GNLD_DPM_UVD].enabled)
> > +               data->uvd_power_gated = false;
> > +
> > +       if (data->smu_features[GNLD_DPM_VCE].enabled)
> > +               data->vce_power_gated = false; }
> > +
> >  static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)  {
> >         struct vega12_hwmgr *data =
> > @@ -801,6 +816,8 @@ static int vega12_enable_all_smu_features(struct
> pp_hwmgr *hwmgr)
> >                 }
> >         }
> >
> > +       vega12_init_powergate_state(hwmgr);
> > +
> >         return 0;
> >  }
> >
> > --
> > 2.7.4
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change
       [not found]             ` <SN6PR12MB26567D8241FEECAF286CEBA8E4770-kxOKjb6HO/FeL/N0e1LXkAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-06-20 14:05               ` Deucher, Alexander
  0 siblings, 0 replies; 30+ messages in thread
From: Deucher, Alexander @ 2018-06-20 14:05 UTC (permalink / raw)
  To: Quan, Evan, Alex Deucher; +Cc: amd-gfx list


[-- Attachment #1.1: Type: text/plain, Size: 14628 bytes --]

With your proposed changes patch is:

Acked-by: Alex Deucher <alexander.deucher-5C7GfCeVMHo@public.gmane.org>

________________________________
From: amd-gfx <amd-gfx-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org> on behalf of Quan, Evan <Evan.Quan-5C7GfCeVMHo@public.gmane.org>
Sent: Wednesday, June 20, 2018 2:17:57 AM
To: Alex Deucher
Cc: amd-gfx list
Subject: RE: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change

Hi Alex,

Comment inline.

> -----Original Message-----
> From: Alex Deucher [mailto:alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org]
> Sent: Tuesday, June 19, 2018 11:17 PM
> To: Quan, Evan <Evan.Quan-5C7GfCeVMHo@public.gmane.org>
> Cc: amd-gfx list <amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>
> Subject: Re: [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on
> power state change
>
> On Tue, Jun 19, 2018 at 3:39 AM, Evan Quan <evan.quan-5C7GfCeVMHo@public.gmane.org> wrote:
> > The clocks hard/soft min/max clock levels will be adjusted
> > correspondingly.
>
>
> Also note that this add the apply_clocks_adjust_rules callback which is used
> to validate the clock settings on a power state change.  One other comment
> below.
Yes, this is for the apply_clocks_adjust_rules callback. I updated the patch description as below

drm/amd/powerplay: apply clocks adjust rules on power state change

This add the apply_clocks_adjust_rules callback which is used
to validate the clock settings on a power state change.
> >
> > Change-Id: I2c4b6cd6756d40a28933f0c26b9e1a3d5078bab8
> > Signed-off-by: Evan Quan <evan.quan-5C7GfCeVMHo@public.gmane.org>
> > ---
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162
> +++++++++++++++++++++
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h |   2 +
> >  2 files changed, 164 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > index a227ace..26bdfff 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > @@ -1950,6 +1950,166 @@ static int vega12_print_clock_levels(struct
> pp_hwmgr *hwmgr,
> >         return size;
> >  }
> >
> > +static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) {
> > +       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr-
> >backend);
> > +       struct vega12_single_dpm_table *dpm_table;
> > +       bool vblank_too_short = false;
> > +       bool disable_mclk_switching;
> > +       uint32_t i, latency;
> > +
> > +       disable_mclk_switching = ((1 < hwmgr->display_config->num_display)
> &&
> > +                                 !hwmgr->display_config->multi_monitor_in_sync) ||
> > +                                 vblank_too_short;
> > +       latency =
> > + hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
> > +
> > +       /* gfxclk */
> > +       dpm_table = &(data->dpm_table.gfx_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[0].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* memclk */
> > +       dpm_table = &(data->dpm_table.mem_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[0].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* honour DAL's UCLK Hardmin */
> > +       if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config-
> >min_mem_set_clock / 100))
> > +               dpm_table->dpm_state.hard_min_level =
> > + hwmgr->display_config->min_mem_set_clock / 100;
> > +
>
> Didn't you just remove the uclk hard min setting in a previous patch?
>
Yes, it was moved here together with other clocks' min settings.
>
> > +       /* Hardmin is dependent on displayconfig */
> > +       if (disable_mclk_switching) {
> > +               dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
> > +                       if (data->mclk_latency_table.entries[i].latency <= latency) {
> > +                               if (dpm_table->dpm_levels[i].value >= (hwmgr-
> >display_config->min_mem_set_clock / 100)) {
> > +                                       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[i].value;
> > +                                       break;
> > +                               }
> > +                       }
> > +               }
> > +       }
> > +
> > +       if (hwmgr->display_config->nb_pstate_switch_disable)
> > +               dpm_table->dpm_state.hard_min_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       /* vclk */
> > +       dpm_table = &(data->dpm_table.vclk_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* dclk */
> > +       dpm_table = &(data->dpm_table.dclk_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* socclk */
> > +       dpm_table = &(data->dpm_table.soc_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       /* eclk */
> > +       dpm_table = &(data->dpm_table.eclk_table);
> > +       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +       dpm_table->dpm_state.hard_min_level = dpm_table-
> >dpm_levels[0].value;
> > +       dpm_table->dpm_state.hard_max_level =
> > + dpm_table->dpm_levels[dpm_table->count - 1].value;
> > +
> > +       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
> > +               if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count)
> {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
> > +               }
> > +
> > +               if (hwmgr->dpm_level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
> > +                       dpm_table->dpm_state.soft_min_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +                       dpm_table->dpm_state.soft_max_level = dpm_table-
> >dpm_levels[dpm_table->count - 1].value;
> > +               }
> > +       }
> > +
> > +       return 0;
> > +}
> > +
> >  static int vega12_display_configuration_changed_task(struct pp_hwmgr
> > *hwmgr)  {
> >         struct vega12_hwmgr *data = (struct vega12_hwmgr
> > *)(hwmgr->backend); @@ -2196,6 +2356,8 @@ static const struct
> pp_hwmgr_func vega12_hwmgr_funcs = {
> >         .display_clock_voltage_request =
> vega12_display_clock_voltage_request,
> >         .force_clock_level = vega12_force_clock_level,
> >         .print_clock_levels = vega12_print_clock_levels,
> > +       .apply_clocks_adjust_rules =
> > +               vega12_apply_clocks_adjust_rules,
> >         .display_config_changed =
> vega12_display_configuration_changed_task,
> >         .powergate_uvd = vega12_power_gate_uvd,
> >         .powergate_vce = vega12_power_gate_vce, diff --git
> > a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > index e18c083..e17237c 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> > @@ -443,6 +443,8 @@ struct vega12_hwmgr {
> >  #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL         0x3
> >  #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL         0x3
> >  #define VEGA12_UMD_PSTATE_MCLK_LEVEL           0x2
> > +#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL         0x3
> > +#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL        0x3
> >
> >  int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool
> > enable);
> >
> > --
> > 2.7.4
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[-- Attachment #1.2: Type: text/html, Size: 29094 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2018-06-20 14:05 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-19  7:38 [PATCH 01/13] drm/amd/powerplay: correct vega12 bootup values settings Evan Quan
     [not found] ` <1529393945-16629-1-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19  7:38   ` [PATCH 02/13] drm/amd/powerplay: smc_dpm_info structure change Evan Quan
     [not found]     ` <1529393945-16629-2-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 14:57       ` Alex Deucher
2018-06-19  7:38   ` [PATCH 03/13] drm/amd/powerplay: drop the acg fix Evan Quan
     [not found]     ` <1529393945-16629-3-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 14:57       ` Alex Deucher
2018-06-19  7:38   ` [PATCH 04/13] drm/amd/powerplay: revise default dpm tables setup Evan Quan
     [not found]     ` <1529393945-16629-4-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 14:59       ` Alex Deucher
2018-06-19  7:38   ` [PATCH 05/13] drm/amd/powerplay: retrieve all clock ranges on startup Evan Quan
     [not found]     ` <1529393945-16629-5-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:03       ` Alex Deucher
2018-06-19  7:38   ` [PATCH 06/13] drm/amd/powerplay: revise clock level setup Evan Quan
     [not found]     ` <1529393945-16629-6-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:07       ` Alex Deucher
2018-06-19  7:38   ` [PATCH 07/13] drm/amd/powerplay: initialize uvd/vce powergate status Evan Quan
     [not found]     ` <1529393945-16629-7-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:10       ` Alex Deucher
     [not found]         ` <CADnq5_NSa8Bh_ffmz12Zu2XH4b3GawMdz_6vTF5NbFp_pLaLYA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-06-20  6:34           ` Quan, Evan
2018-06-19  7:39   ` [PATCH 08/13] drm/amd/powerplay: correct smc display config setting Evan Quan
     [not found]     ` <1529393945-16629-8-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:13       ` Alex Deucher
2018-06-19  7:39   ` [PATCH 09/13] drm/amd/powerplay: correct vega12 max num of dpm level Evan Quan
     [not found]     ` <1529393945-16629-9-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:13       ` Alex Deucher
2018-06-19  7:39   ` [PATCH 10/13] drm/amd/powerplay: apply clocks adjust rules on power state change Evan Quan
     [not found]     ` <1529393945-16629-10-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:16       ` Alex Deucher
     [not found]         ` <CADnq5_PqQ0aRcA=8iUq9qLow+W6rwpXhsj2GKRskn3xjwEbicw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-06-19 15:43           ` Zhu, Rex
     [not found]             ` <CY4PR12MB1687A705AE09BB6492A5C156FB700-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-06-20  6:22               ` Quan, Evan
2018-06-20  6:17           ` Quan, Evan
     [not found]             ` <SN6PR12MB26567D8241FEECAF286CEBA8E4770-kxOKjb6HO/FeL/N0e1LXkAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-06-20 14:05               ` Deucher, Alexander
2018-06-19  7:39   ` [PATCH 11/13] drm/amd/powerplay: set vega12 pre display configurations Evan Quan
     [not found]     ` <1529393945-16629-11-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:18       ` Alex Deucher
2018-06-19  7:39   ` [PATCH 12/13] drm/amd/powerplay: correct vega12 thermal support as true Evan Quan
     [not found]     ` <1529393945-16629-12-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:18       ` Alex Deucher
2018-06-19  7:39   ` [PATCH 13/13] drm/amd/powerplay: cosmetic fix Evan Quan
     [not found]     ` <1529393945-16629-13-git-send-email-evan.quan-5C7GfCeVMHo@public.gmane.org>
2018-06-19 15:19       ` Alex Deucher

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.