All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
@ 2018-01-25 11:26 Rex Zhu
       [not found] ` <1516879614-11533-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Rex Zhu @ 2018-01-25 11:26 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Rex Zhu

Driver do not maintain manual mode for dpm_force_performance_level,
User can set sclk/mclk/pcie range through pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
directly.

In order to not break currently tools,
when set "manual" to power_dpm_force_performance_level
driver will do nothing and just return successful.

Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
 drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15 +++++++--------
 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
 7 files changed, 8 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 1812009..66b4df0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -152,7 +152,6 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
 			(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
 			(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
 			(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
-			(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
 			(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
 			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
 			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
@@ -186,7 +185,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
 		level = AMD_DPM_FORCED_LEVEL_AUTO;
 	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
-		level = AMD_DPM_FORCED_LEVEL_MANUAL;
+		pr_info("No need to set manual mode, Just go ahead\n");
 	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
 		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
 	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ab45232..8ddc978 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct ci_power_info *pi = ci_get_pi(adev);
 
-	if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-				AMD_DPM_FORCED_LEVEL_LOW |
-				AMD_DPM_FORCED_LEVEL_HIGH))
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		if (!pi->sclk_dpm_key_disabled)
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index b9aa9f4..3fab686 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -41,14 +41,13 @@ struct amd_vce_state {
 
 enum amd_dpm_forced_level {
 	AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
-	AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
-	AMD_DPM_FORCED_LEVEL_LOW = 0x4,
-	AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
-	AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
-	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
-	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
-	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
-	AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
+	AMD_DPM_FORCED_LEVEL_LOW = 0x2,
+	AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
+	AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
+	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
+	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
+	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
+	AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
 };
 
 enum amd_pm_state_type {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index dec8dd9..60d280c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 	case AMD_DPM_FORCED_LEVEL_AUTO:
 		ret = cz_phm_unforce_dpm_levels(hwmgr);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
@@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
 static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, uint32_t mask)
 {
-	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		smum_send_msg_to_smc_with_parameter(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 409a56b..eddcbcd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 						PPSMC_MSG_SetSoftMaxFclkByFreq,
 						RAVEN_UMD_PSTATE_MIN_FCLK);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 13db75c..e3a8374 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
 		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
@@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-					AMD_DPM_FORCED_LEVEL_LOW |
-					AMD_DPM_FORCED_LEVEL_HIGH))
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		if (!data->sclk_dpm_key_disabled)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 6b28896..828677e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 		vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
 		vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
@@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
 {
 	struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
 
-	if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-				AMD_DPM_FORCED_LEVEL_LOW |
-				AMD_DPM_FORCED_LEVEL_HIGH))
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/2] drm/amd/pp: Fix sysfs pp_dpm_pcie bug on CI/VI
       [not found] ` <1516879614-11533-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
@ 2018-01-25 11:26   ` Rex Zhu
  2018-01-25 16:55   ` [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level Felix Kuehling
  1 sibling, 0 replies; 16+ messages in thread
From: Rex Zhu @ 2018-01-25 11:26 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Rex Zhu

when echo "01">pp_dpm_pcie
the pcie dpm will fix in highest link speed.
But user should expect auto speed between
level 0 and level1

Change-Id: I89b8fef3c179c3729b1c06b17e9cb86b52244d7b
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c              | 17 ++++++++++-------
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 17 ++++++++++-------
 2 files changed, 20 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 8ddc978..fc19d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6639,6 +6639,9 @@ static int ci_dpm_force_clock_level(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct ci_power_info *pi = ci_get_pi(adev);
 
+	if (mask == 0)
+		return -EINVAL;
+
 	switch (type) {
 	case PP_SCLK:
 		if (!pi->sclk_dpm_key_disabled)
@@ -6657,15 +6660,15 @@ static int ci_dpm_force_clock_level(void *handle,
 	case PP_PCIE:
 	{
 		uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
-		uint32_t level = 0;
 
-		while (tmp >>= 1)
-			level++;
-
-		if (!pi->pcie_dpm_key_disabled)
-			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+		if (!pi->pcie_dpm_key_disabled) {
+			if (fls(tmp) != ffs(tmp))
+				amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+			else
+				amdgpu_ci_send_msg_to_smc_with_parameter(adev,
 					PPSMC_MSG_PCIeDPM_ForceLevel,
-					level);
+					fls(tmp) - 1);
+		}
 		break;
 	}
 	default:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index e3a8374..88aaac1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4310,6 +4310,9 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
+	if (mask == 0)
+		return -EINVAL;
+
 	switch (type) {
 	case PP_SCLK:
 		if (!data->sclk_dpm_key_disabled)
@@ -4326,15 +4329,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 	case PP_PCIE:
 	{
 		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-		uint32_t level = 0;
 
-		while (tmp >>= 1)
-			level++;
-
-		if (!data->pcie_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr,
+		if (!data->pcie_dpm_key_disabled) {
+			if (fls(tmp) != ffs(tmp))
+				smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
+			else
+				smum_send_msg_to_smc_with_parameter(hwmgr,
 					PPSMC_MSG_PCIeDPM_ForceLevel,
-					level);
+					fls(tmp) - 1);
+		}
 		break;
 	}
 	default:
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found] ` <1516879614-11533-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
  2018-01-25 11:26   ` [PATCH 2/2] drm/amd/pp: Fix sysfs pp_dpm_pcie bug on CI/VI Rex Zhu
@ 2018-01-25 16:55   ` Felix Kuehling
       [not found]     ` <51c6111b-78ec-36f8-b5e0-4a23ccea6de4-5C7GfCeVMHo@public.gmane.org>
  1 sibling, 1 reply; 16+ messages in thread
From: Felix Kuehling @ 2018-01-25 16:55 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Zhu, Rex

This patch breaks unforcing of clocks, which is currently done by
switching back from "manual" to "auto". By removing "manual" mode, you
remove the ability to unset forced clocks.

Regards,
  Felix


On 2018-01-25 06:26 AM, Rex Zhu wrote:
> Driver do not maintain manual mode for dpm_force_performance_level,
> User can set sclk/mclk/pcie range through pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> directly.
>
> In order to not break currently tools,
> when set "manual" to power_dpm_force_performance_level
> driver will do nothing and just return successful.
>
> Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15 +++++++--------
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>  7 files changed, 8 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 1812009..66b4df0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -152,7 +152,6 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
>  			(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
>  			(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
>  			(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
> -			(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
>  			(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>  			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>  			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> @@ -186,7 +185,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
>  	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
>  		level = AMD_DPM_FORCED_LEVEL_AUTO;
>  	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
> -		level = AMD_DPM_FORCED_LEVEL_MANUAL;
> +		pr_info("No need to set manual mode, Just go ahead\n");
>  	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
>  		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>  	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> index ab45232..8ddc978 100644
> --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void *handle,
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  	struct ci_power_info *pi = ci_get_pi(adev);
>  
> -	if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> -				AMD_DPM_FORCED_LEVEL_LOW |
> -				AMD_DPM_FORCED_LEVEL_HIGH))
> -		return -EINVAL;
> -
>  	switch (type) {
>  	case PP_SCLK:
>  		if (!pi->sclk_dpm_key_disabled)
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index b9aa9f4..3fab686 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -41,14 +41,13 @@ struct amd_vce_state {
>  
>  enum amd_dpm_forced_level {
>  	AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> -	AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> -	AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> -	AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> -	AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> -	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> -	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> -	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> -	AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> +	AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> +	AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> +	AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> +	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> +	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> +	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> +	AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>  };
>  
>  enum amd_pm_state_type {
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> index dec8dd9..60d280c 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>  	case AMD_DPM_FORCED_LEVEL_AUTO:
>  		ret = cz_phm_unforce_dpm_levels(hwmgr);
>  		break;
> -	case AMD_DPM_FORCED_LEVEL_MANUAL:
>  	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>  	default:
>  		break;
> @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
>  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>  		enum pp_clock_type type, uint32_t mask)
>  {
> -	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> -		return -EINVAL;
> -
>  	switch (type) {
>  	case PP_SCLK:
>  		smum_send_msg_to_smc_with_parameter(hwmgr,
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> index 409a56b..eddcbcd 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>  						PPSMC_MSG_SetSoftMaxFclkByFreq,
>  						RAVEN_UMD_PSTATE_MIN_FCLK);
>  		break;
> -	case AMD_DPM_FORCED_LEVEL_MANUAL:
>  	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>  	default:
>  		break;
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> index 13db75c..e3a8374 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
>  		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>  		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
>  		break;
> -	case AMD_DPM_FORCED_LEVEL_MANUAL:
>  	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>  	default:
>  		break;
> @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
>  {
>  	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
>  
> -	if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> -					AMD_DPM_FORCED_LEVEL_LOW |
> -					AMD_DPM_FORCED_LEVEL_HIGH))
> -		return -EINVAL;
> -
>  	switch (type) {
>  	case PP_SCLK:
>  		if (!data->sclk_dpm_key_disabled)
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index 6b28896..828677e 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>  		vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
>  		vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>  		break;
> -	case AMD_DPM_FORCED_LEVEL_MANUAL:
>  	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>  	default:
>  		break;
> @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
>  {
>  	struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
>  
> -	if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> -				AMD_DPM_FORCED_LEVEL_LOW |
> -				AMD_DPM_FORCED_LEVEL_HIGH))
> -		return -EINVAL;
> -
>  	switch (type) {
>  	case PP_SCLK:
>  		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]     ` <51c6111b-78ec-36f8-b5e0-4a23ccea6de4-5C7GfCeVMHo@public.gmane.org>
@ 2018-01-26  0:07       ` Zhu, Rex
       [not found]         ` <CY4PR12MB1687930CD8F44390C3791A63FBE10-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Zhu, Rex @ 2018-01-26  0:07 UTC (permalink / raw)
  To: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW


[-- Attachment #1.1: Type: text/plain, Size: 9634 bytes --]

I also think about this problem.
just think user should unforced clk level through pp dpm sclk/mclk/pcie if they change the clock logic through those sysfs.

The logic seems weird, As we supply many sysfs for adjust clock range.

We can fix this problem by change current mode to manual mode after user call pp dpm sclk/mclk/pcie.

But another think?if user change back the clk range through pp dpm clk.

we are in manual mode, and user set auto mode, in fact, driver change nothing.

Comparatively speaking, better set manual mode after user call pp dpm clk.
Thanks very much.

Best Regards
Rex
________________________________
From: Kuehling, Felix
Sent: Friday, January 26, 2018 12:55:19 AM
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org; Zhu, Rex
Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level

This patch breaks unforcing of clocks, which is currently done by
switching back from "manual" to "auto". By removing "manual" mode, you
remove the ability to unset forced clocks.

Regards,
  Felix


On 2018-01-25 06:26 AM, Rex Zhu wrote:
> Driver do not maintain manual mode for dpm_force_performance_level,
> User can set sclk/mclk/pcie range through pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> directly.
>
> In order to not break currently tools,
> when set "manual" to power_dpm_force_performance_level
> driver will do nothing and just return successful.
>
> Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> Signed-off-by: Rex Zhu <Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15 +++++++--------
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>  7 files changed, 8 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 1812009..66b4df0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -152,7 +152,6 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
>                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
>                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
>                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
> -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
>                        (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>                        (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>                        (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> @@ -186,7 +185,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
>        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
>                level = AMD_DPM_FORCED_LEVEL_AUTO;
>        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
> -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
> +             pr_info("No need to set manual mode, Just go ahead\n");
>        } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
>                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>        } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> index ab45232..8ddc978 100644
> --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void *handle,
>        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>        struct ci_power_info *pi = ci_get_pi(adev);
>
> -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> -                             AMD_DPM_FORCED_LEVEL_LOW |
> -                             AMD_DPM_FORCED_LEVEL_HIGH))
> -             return -EINVAL;
> -
>        switch (type) {
>        case PP_SCLK:
>                if (!pi->sclk_dpm_key_disabled)
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index b9aa9f4..3fab686 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -41,14 +41,13 @@ struct amd_vce_state {
>
>  enum amd_dpm_forced_level {
>        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>  };
>
>  enum amd_pm_state_type {
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> index dec8dd9..60d280c 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>        case AMD_DPM_FORCED_LEVEL_AUTO:
>                ret = cz_phm_unforce_dpm_levels(hwmgr);
>                break;
> -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>        default:
>                break;
> @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
>  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>                enum pp_clock_type type, uint32_t mask)
>  {
> -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> -             return -EINVAL;
> -
>        switch (type) {
>        case PP_SCLK:
>                smum_send_msg_to_smc_with_parameter(hwmgr,
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> index 409a56b..eddcbcd 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
>                                                RAVEN_UMD_PSTATE_MIN_FCLK);
>                break;
> -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>        default:
>                break;
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> index 13db75c..e3a8374 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
>                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
>                break;
> -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>        default:
>                break;
> @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
>  {
>        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
>
> -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> -                                     AMD_DPM_FORCED_LEVEL_LOW |
> -                                     AMD_DPM_FORCED_LEVEL_HIGH))
> -             return -EINVAL;
> -
>        switch (type) {
>        case PP_SCLK:
>                if (!data->sclk_dpm_key_disabled)
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index 6b28896..828677e 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
>                vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
>                vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>                break;
> -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>        default:
>                break;
> @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
>  {
>        struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
>
> -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> -                             AMD_DPM_FORCED_LEVEL_LOW |
> -                             AMD_DPM_FORCED_LEVEL_HIGH))
> -             return -EINVAL;
> -
>        switch (type) {
>        case PP_SCLK:
>                data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;


[-- Attachment #1.2: Type: text/html, Size: 17577 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]         ` <CY4PR12MB1687930CD8F44390C3791A63FBE10-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-01-26  0:26           ` Felix Kuehling
       [not found]             ` <0ce63372-dc11-9710-f11d-0cf6abf326b4-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Felix Kuehling @ 2018-01-26  0:26 UTC (permalink / raw)
  To: Zhu, Rex, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-01-25 07:07 PM, Zhu, Rex wrote:
> I also think about this problem.
> just think user should unforced clk level through pp dpm
> sclk/mclk/pcie if they change the clock logic through those sysfs.
>
> The logic seems weird, As we supply many sysfs for adjust clock range.
>
> We can fix this problem by change current mode to manual mode after
> user call pp dpm sclk/mclk/pcie.
>
> But another think,if user change back the clk range through pp dpm clk.
>
> we are in manual mode, and user set auto mode, in fact, driver change
> nothing.

With profiles, switching back to auto mode would select the appropriate
profile, which may have a different clock mask. For example for compute
we enable only the highest two sclk levels.

>
> Comparatively speaking, better set manual mode after user call pp dpm clk.

That would make sense. But switching to manual mode would disable
profiles and automatic profile selection. That was one reason why I
objected to your plan to control profile clock limits using these files.

Regards,
  Felix

> Thanks very much.
>
> Best Regards
> Rex
> ------------------------------------------------------------------------
> *From:* Kuehling, Felix
> *Sent:* Friday, January 26, 2018 12:55:19 AM
> *To:* amd-gfx@lists.freedesktop.org; Zhu, Rex
> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>  
> This patch breaks unforcing of clocks, which is currently done by
> switching back from "manual" to "auto". By removing "manual" mode, you
> remove the ability to unset forced clocks.
>
> Regards,
>   Felix
>
>
> On 2018-01-25 06:26 AM, Rex Zhu wrote:
> > Driver do not maintain manual mode for dpm_force_performance_level,
> > User can set sclk/mclk/pcie range through
> pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> > directly.
> >
> > In order to not break currently tools,
> > when set "manual" to power_dpm_force_performance_level
> > driver will do nothing and just return successful.
> >
> > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> > Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
> >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
> >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15 +++++++--------
> >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
> >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
> >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
> >  7 files changed, 8 insertions(+), 32 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > index 1812009..66b4df0 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > @@ -152,7 +152,6 @@ static ssize_t
> amdgpu_get_dpm_forced_performance_level(struct device *dev,
> >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
> >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
> > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
> "manual" :
> >                        (level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
> >                        (level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
> >                        (level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> > @@ -186,7 +185,7 @@ static ssize_t
> amdgpu_set_dpm_forced_performance_level(struct device *dev,
> >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
> >                level = AMD_DPM_FORCED_LEVEL_AUTO;
> >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
> > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
> > +             pr_info("No need to set manual mode, Just go ahead\n");
> >        } else if (strncmp("profile_exit", buf,
> strlen("profile_exit")) == 0) {
> >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
> >        } else if (strncmp("profile_standard", buf,
> strlen("profile_standard")) == 0) {
> > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > index ab45232..8ddc978 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void *handle,
> >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> >        struct ci_power_info *pi = ci_get_pi(adev);
> > 
> > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                if (!pi->sclk_dpm_key_disabled)
> > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > index b9aa9f4..3fab686 100644
> > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > @@ -41,14 +41,13 @@ struct amd_vce_state {
> > 
> >  enum amd_dpm_forced_level {
> >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
> >  };
> > 
> >  enum amd_pm_state_type {
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > index dec8dd9..60d280c 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >        case AMD_DPM_FORCED_LEVEL_AUTO:
> >                ret = cz_phm_unforce_dpm_levels(hwmgr);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
> pp_hwmgr *hwmgr,
> >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
> >                enum pp_clock_type type, uint32_t mask)
> >  {
> > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                smum_send_msg_to_smc_with_parameter(hwmgr,
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > index 409a56b..eddcbcd 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >                                               
> PPSMC_MSG_SetSoftMaxFclkByFreq,
> >                                               
> RAVEN_UMD_PSTATE_MIN_FCLK);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > index 13db75c..e3a8374 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
> pp_hwmgr *hwmgr,
> >  {
> >        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
> > 
> > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > -                                     AMD_DPM_FORCED_LEVEL_LOW |
> > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                if (!data->sclk_dpm_key_disabled)
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > index 6b28896..828677e 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >                vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
> >                vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct
> pp_hwmgr *hwmgr,
> >  {
> >        struct vega10_hwmgr *data = (struct vega10_hwmgr
> *)(hwmgr->backend);
> > 
> > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                data->smc_state_table.gfx_boot_level = mask ?
> (ffs(mask) - 1) : 0;
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]             ` <0ce63372-dc11-9710-f11d-0cf6abf326b4-5C7GfCeVMHo@public.gmane.org>
@ 2018-01-26 12:50               ` Zhu, Rex
       [not found]                 ` <CY4PR12MB168744ABA067C470390EA34EFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Zhu, Rex @ 2018-01-26 12:50 UTC (permalink / raw)
  To: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW


[-- Attachment #1.1: Type: text/plain, Size: 11678 bytes --]

Hi Felix,

>That would make sense. But switching to manual mode would disable
>profiles and automatic profile selection. That was one reason why I
>objected to your plan to control profile clock limits using these files.


Rex:


I am not very clear the old logic of gfx/compute power profile switch.


But with new sysfs,



The logic is(those sysfs are independent)

  1.  configure uphyst/downhyst/min_ativity through power_profile_mode,

      2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once this sysffs was called, set the dpm level mode to unknown)

      3. adjust power limit through pp_od_power_limit(maybe equal to disable power containment).



In those functions, driver do not check the dpm level mode.

the dpm level mode just used by power_dpm_force_performance_level functions.


Best Regards

Rex




________________________________
From: Kuehling, Felix
Sent: Friday, January 26, 2018 8:26 AM
To: Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level

On 2018-01-25 07:07 PM, Zhu, Rex wrote:
> I also think about this problem.
> just think user should unforced clk level through pp dpm
> sclk/mclk/pcie if they change the clock logic through those sysfs.
>
> The logic seems weird, As we supply many sysfs for adjust clock range.
>
> We can fix this problem by change current mode to manual mode after
> user call pp dpm sclk/mclk/pcie.
>
> But another think,if user change back the clk range through pp dpm clk.
>
> we are in manual mode, and user set auto mode, in fact, driver change
> nothing.

With profiles, switching back to auto mode would select the appropriate
profile, which may have a different clock mask. For example for compute
we enable only the highest two sclk levels.

>
> Comparatively speaking, better set manual mode after user call pp dpm clk.

That would make sense. But switching to manual mode would disable
profiles and automatic profile selection. That was one reason why I
objected to your plan to control profile clock limits using these files.

Regards,
  Felix

> Thanks very much.
>
> Best Regards
> Rex
> ------------------------------------------------------------------------
> *From:* Kuehling, Felix
> *Sent:* Friday, January 26, 2018 12:55:19 AM
> *To:* amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org; Zhu, Rex
> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>
> This patch breaks unforcing of clocks, which is currently done by
> switching back from "manual" to "auto". By removing "manual" mode, you
> remove the ability to unset forced clocks.
>
> Regards,
>   Felix
>
>
> On 2018-01-25 06:26 AM, Rex Zhu wrote:
> > Driver do not maintain manual mode for dpm_force_performance_level,
> > User can set sclk/mclk/pcie range through
> pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> > directly.
> >
> > In order to not break currently tools,
> > when set "manual" to power_dpm_force_performance_level
> > driver will do nothing and just return successful.
> >
> > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> > Signed-off-by: Rex Zhu <Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
> >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
> >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15 +++++++--------
> >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
> >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
> >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
> >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
> >  7 files changed, 8 insertions(+), 32 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > index 1812009..66b4df0 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > @@ -152,7 +152,6 @@ static ssize_t
> amdgpu_get_dpm_forced_performance_level(struct device *dev,
> >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
> >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
> > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
> "manual" :
> >                        (level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
> >                        (level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
> >                        (level ==
> AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> > @@ -186,7 +185,7 @@ static ssize_t
> amdgpu_set_dpm_forced_performance_level(struct device *dev,
> >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
> >                level = AMD_DPM_FORCED_LEVEL_AUTO;
> >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
> > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
> > +             pr_info("No need to set manual mode, Just go ahead\n");
> >        } else if (strncmp("profile_exit", buf,
> strlen("profile_exit")) == 0) {
> >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
> >        } else if (strncmp("profile_standard", buf,
> strlen("profile_standard")) == 0) {
> > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > index ab45232..8ddc978 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void *handle,
> >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> >        struct ci_power_info *pi = ci_get_pi(adev);
> >
> > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                if (!pi->sclk_dpm_key_disabled)
> > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > index b9aa9f4..3fab686 100644
> > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > @@ -41,14 +41,13 @@ struct amd_vce_state {
> >
> >  enum amd_dpm_forced_level {
> >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
> >  };
> >
> >  enum amd_pm_state_type {
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > index dec8dd9..60d280c 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >        case AMD_DPM_FORCED_LEVEL_AUTO:
> >                ret = cz_phm_unforce_dpm_levels(hwmgr);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
> pp_hwmgr *hwmgr,
> >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
> >                enum pp_clock_type type, uint32_t mask)
> >  {
> > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                smum_send_msg_to_smc_with_parameter(hwmgr,
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > index 409a56b..eddcbcd 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >
> PPSMC_MSG_SetSoftMaxFclkByFreq,
> >
> RAVEN_UMD_PSTATE_MIN_FCLK);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > index 13db75c..e3a8374 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
> pp_hwmgr *hwmgr,
> >  {
> >        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
> >
> > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > -                                     AMD_DPM_FORCED_LEVEL_LOW |
> > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                if (!data->sclk_dpm_key_disabled)
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > index 6b28896..828677e 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct
> pp_hwmgr *hwmgr,
> >                vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
> >                vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> >                break;
> > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> >        default:
> >                break;
> > @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct
> pp_hwmgr *hwmgr,
> >  {
> >        struct vega10_hwmgr *data = (struct vega10_hwmgr
> *)(hwmgr->backend);
> >
> > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > -             return -EINVAL;
> > -
> >        switch (type) {
> >        case PP_SCLK:
> >                data->smc_state_table.gfx_boot_level = mask ?
> (ffs(mask) - 1) : 0;
>


[-- Attachment #1.2: Type: text/html, Size: 23685 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                 ` <CY4PR12MB168744ABA067C470390EA34EFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-01-26 16:49                   ` Felix Kuehling
       [not found]                     ` <ec6ea7dd-0096-dd92-8c49-b2992b5bf506-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Felix Kuehling @ 2018-01-26 16:49 UTC (permalink / raw)
  To: Zhu, Rex, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi Rex,

I think I understand what you're trying to do. To summarize my concerns,
there are two reasons I'm against your plan:

 1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
    interfaces, which affects existing tools
 2. You're taking the clock limits out of the power profile.
    Automatically adjusting the minimum sclk/mclk is a requirement for
    the compute power profile

Regards,
  Felix

On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>
> Hi Felix,
>
>
> >That would make sense. But switching to manual mode would disable
> >profiles and automatic profile selection. That was one reason why I
> >objected to your plan to control profile clock limits using these files.
>
> Rex:
>
>
> I am not very clear the old logic of gfx/compute power profile switch.
>
>
> But with new sysfs,
>
>  
>
> The logic is(those sysfs are independent) 
>
>  1. configure uphyst/downhyst/min_ativity through power_profile_mode,
>
>       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once this
> sysffs was called, set the dpm level mode to unknown)
>
>       3. adjust power limit through pp_od_power_limit(maybe equal to
> disable power containment).
>
>       
>
> In those functions, driver do not check the dpm level mode. 
>
> the dpm level mode just used by power_dpm_force_performance_level
> functions.
>
>
> Best Regards
>
> Rex
>
>
>
>
>
> ------------------------------------------------------------------------
> *From:* Kuehling, Felix
> *Sent:* Friday, January 26, 2018 8:26 AM
> *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>  
> On 2018-01-25 07:07 PM, Zhu, Rex wrote:
> > I also think about this problem.
> > just think user should unforced clk level through pp dpm
> > sclk/mclk/pcie if they change the clock logic through those sysfs.
> >
> > The logic seems weird, As we supply many sysfs for adjust clock range.
> >
> > We can fix this problem by change current mode to manual mode after
> > user call pp dpm sclk/mclk/pcie.
> >
> > But another think,if user change back the clk range through pp dpm clk.
> >
> > we are in manual mode, and user set auto mode, in fact, driver change
> > nothing.
>
> With profiles, switching back to auto mode would select the appropriate
> profile, which may have a different clock mask. For example for compute
> we enable only the highest two sclk levels.
>
> >
> > Comparatively speaking, better set manual mode after user call pp
> dpm clk.
>
> That would make sense. But switching to manual mode would disable
> profiles and automatic profile selection. That was one reason why I
> objected to your plan to control profile clock limits using these files.
>
> Regards,
>   Felix
>
> > Thanks very much.
> >
> > Best Regards
> > Rex
> > ------------------------------------------------------------------------
> > *From:* Kuehling, Felix
> > *Sent:* Friday, January 26, 2018 12:55:19 AM
> > *To:* amd-gfx@lists.freedesktop.org; Zhu, Rex
> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> > power_dpm_force_performance_level
> >  
> > This patch breaks unforcing of clocks, which is currently done by
> > switching back from "manual" to "auto". By removing "manual" mode, you
> > remove the ability to unset forced clocks.
> >
> > Regards,
> >   Felix
> >
> >
> > On 2018-01-25 06:26 AM, Rex Zhu wrote:
> > > Driver do not maintain manual mode for dpm_force_performance_level,
> > > User can set sclk/mclk/pcie range through
> > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> > > directly.
> > >
> > > In order to not break currently tools,
> > > when set "manual" to power_dpm_force_performance_level
> > > driver will do nothing and just return successful.
> > >
> > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> > > Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
> > > ---
> > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
> > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
> > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
> +++++++--------
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
> > >  7 files changed, 8 insertions(+), 32 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > index 1812009..66b4df0 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > @@ -152,7 +152,6 @@ static ssize_t
> > amdgpu_get_dpm_forced_performance_level(struct device *dev,
> > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
> > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
> > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
> > "manual" :
> > >                        (level ==
> > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
> > >                        (level ==
> > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
> > >                        (level ==
> > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> > > @@ -186,7 +185,7 @@ static ssize_t
> > amdgpu_set_dpm_forced_performance_level(struct device *dev,
> > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
> > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
> > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
> > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
> > > +             pr_info("No need to set manual mode, Just go ahead\n");
> > >        } else if (strncmp("profile_exit", buf,
> > strlen("profile_exit")) == 0) {
> > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
> > >        } else if (strncmp("profile_standard", buf,
> > strlen("profile_standard")) == 0) {
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > index ab45232..8ddc978 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
> *handle,
> > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> > >        struct ci_power_info *pi = ci_get_pi(adev);
> > > 
> > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                if (!pi->sclk_dpm_key_disabled)
> > > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > index b9aa9f4..3fab686 100644
> > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > @@ -41,14 +41,13 @@ struct amd_vce_state {
> > > 
> > >  enum amd_dpm_forced_level {
> > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
> > >  };
> > > 
> > >  enum amd_pm_state_type {
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > index dec8dd9..60d280c 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >        case AMD_DPM_FORCED_LEVEL_AUTO:
> > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
> > pp_hwmgr *hwmgr,
> > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
> > >                enum pp_clock_type type, uint32_t mask)
> > >  {
> > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                smum_send_msg_to_smc_with_parameter(hwmgr,
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > index 409a56b..eddcbcd 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >                                               
> > PPSMC_MSG_SetSoftMaxFclkByFreq,
> > >                                               
> > RAVEN_UMD_PSTATE_MIN_FCLK);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > index 13db75c..e3a8374 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
> > pp_hwmgr *hwmgr,
> > >  {
> > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
> *)(hwmgr->backend);
> > > 
> > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
> > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                if (!data->sclk_dpm_key_disabled)
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > index 6b28896..828677e 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >                vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
> > >                vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct
> > pp_hwmgr *hwmgr,
> > >  {
> > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
> > *)(hwmgr->backend);
> > > 
> > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                data->smc_state_table.gfx_boot_level = mask ?
> > (ffs(mask) - 1) : 0;
> >
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                     ` <ec6ea7dd-0096-dd92-8c49-b2992b5bf506-5C7GfCeVMHo@public.gmane.org>
@ 2018-01-26 19:20                       ` Zhu, Rex
       [not found]                         ` <CY4PR12MB1687BFBCA906C0B17D52089BFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Zhu, Rex @ 2018-01-26 19:20 UTC (permalink / raw)
  To: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW


[-- Attachment #1.1: Type: text/plain, Size: 13996 bytes --]

>1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>    interfaces, which affects existing tools


Rex: I don't think the patch will affects existing tools.


User set "manual" to power_performance_level, and then change the clock range through  pp_dpm_sclk/mclk/pcie.


with this patch, User dont need to set "manual" command,  if still receive the manual command, driver just return sucess to user in order not  break existing

tools.


 >2. You're taking the clock limits out of the power profile.
 >  Automatically adjusting the minimum sclk/mclk is a requirement for
 >   the compute power profile

Rex: In vega10, under default comput mode(with busy_set_point/fps/use_rlc_busy/min_active_level set), just two performance levels left
(level0 and level7). and clock just switch between lowest and highest.

I am not sure in this case, driver still can set min sclk/mclk.

Best Regards
Rex


________________________________
From: Kuehling, Felix
Sent: Saturday, January 27, 2018 12:49 AM
To: Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level

Hi Rex,

I think I understand what you're trying to do. To summarize my concerns,
there are two reasons I'm against your plan:

 1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
    interfaces, which affects existing tools
 2. You're taking the clock limits out of the power profile.
    Automatically adjusting the minimum sclk/mclk is a requirement for
    the compute power profile

Regards,
  Felix

On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>
> Hi Felix,
>
>
> >That would make sense. But switching to manual mode would disable
> >profiles and automatic profile selection. That was one reason why I
> >objected to your plan to control profile clock limits using these files.
>
> Rex:
>
>
> I am not very clear the old logic of gfx/compute power profile switch.
>
>
> But with new sysfs,
>
>
>
> The logic is(those sysfs are independent)
>
>  1. configure uphyst/downhyst/min_ativity through power_profile_mode,
>
>       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once this
> sysffs was called, set the dpm level mode to unknown)
>
>       3. adjust power limit through pp_od_power_limit(maybe equal to
> disable power containment).
>
>
>
> In those functions, driver do not check the dpm level mode.
>
> the dpm level mode just used by power_dpm_force_performance_level
> functions.
>
>
> Best Regards
>
> Rex
>
>
>
>
>
> ------------------------------------------------------------------------
> *From:* Kuehling, Felix
> *Sent:* Friday, January 26, 2018 8:26 AM
> *To:* Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>
> On 2018-01-25 07:07 PM, Zhu, Rex wrote:
> > I also think about this problem.
> > just think user should unforced clk level through pp dpm
> > sclk/mclk/pcie if they change the clock logic through those sysfs.
> >
> > The logic seems weird, As we supply many sysfs for adjust clock range.
> >
> > We can fix this problem by change current mode to manual mode after
> > user call pp dpm sclk/mclk/pcie.
> >
> > But another think,if user change back the clk range through pp dpm clk.
> >
> > we are in manual mode, and user set auto mode, in fact, driver change
> > nothing.
>
> With profiles, switching back to auto mode would select the appropriate
> profile, which may have a different clock mask. For example for compute
> we enable only the highest two sclk levels.
>
> >
> > Comparatively speaking, better set manual mode after user call pp
> dpm clk.
>
> That would make sense. But switching to manual mode would disable
> profiles and automatic profile selection. That was one reason why I
> objected to your plan to control profile clock limits using these files.
>
> Regards,
>   Felix
>
> > Thanks very much.
> >
> > Best Regards
> > Rex
> > ------------------------------------------------------------------------
> > *From:* Kuehling, Felix
> > *Sent:* Friday, January 26, 2018 12:55:19 AM
> > *To:* amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org; Zhu, Rex
> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> > power_dpm_force_performance_level
> >
> > This patch breaks unforcing of clocks, which is currently done by
> > switching back from "manual" to "auto". By removing "manual" mode, you
> > remove the ability to unset forced clocks.
> >
> > Regards,
> >   Felix
> >
> >
> > On 2018-01-25 06:26 AM, Rex Zhu wrote:
> > > Driver do not maintain manual mode for dpm_force_performance_level,
> > > User can set sclk/mclk/pcie range through
> > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> > > directly.
> > >
> > > In order to not break currently tools,
> > > when set "manual" to power_dpm_force_performance_level
> > > driver will do nothing and just return successful.
> > >
> > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> > > Signed-off-by: Rex Zhu <Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
> > > ---
> > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
> > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
> > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
> +++++++--------
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
> > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
> > >  7 files changed, 8 insertions(+), 32 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > index 1812009..66b4df0 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > @@ -152,7 +152,6 @@ static ssize_t
> > amdgpu_get_dpm_forced_performance_level(struct device *dev,
> > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
> > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
> > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
> > "manual" :
> > >                        (level ==
> > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
> > >                        (level ==
> > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
> > >                        (level ==
> > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> > > @@ -186,7 +185,7 @@ static ssize_t
> > amdgpu_set_dpm_forced_performance_level(struct device *dev,
> > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
> > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
> > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
> > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
> > > +             pr_info("No need to set manual mode, Just go ahead\n");
> > >        } else if (strncmp("profile_exit", buf,
> > strlen("profile_exit")) == 0) {
> > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
> > >        } else if (strncmp("profile_standard", buf,
> > strlen("profile_standard")) == 0) {
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > index ab45232..8ddc978 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
> *handle,
> > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> > >        struct ci_power_info *pi = ci_get_pi(adev);
> > >
> > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                if (!pi->sclk_dpm_key_disabled)
> > > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > index b9aa9f4..3fab686 100644
> > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > @@ -41,14 +41,13 @@ struct amd_vce_state {
> > >
> > >  enum amd_dpm_forced_level {
> > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
> > >  };
> > >
> > >  enum amd_pm_state_type {
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > index dec8dd9..60d280c 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >        case AMD_DPM_FORCED_LEVEL_AUTO:
> > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
> > pp_hwmgr *hwmgr,
> > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
> > >                enum pp_clock_type type, uint32_t mask)
> > >  {
> > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                smum_send_msg_to_smc_with_parameter(hwmgr,
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > index 409a56b..eddcbcd 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >
> > PPSMC_MSG_SetSoftMaxFclkByFreq,
> > >
> > RAVEN_UMD_PSTATE_MIN_FCLK);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > index 13db75c..e3a8374 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
> > pp_hwmgr *hwmgr,
> > >  {
> > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
> *)(hwmgr->backend);
> > >
> > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
> > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                if (!data->sclk_dpm_key_disabled)
> > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > index 6b28896..828677e 100644
> > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct
> > pp_hwmgr *hwmgr,
> > >                vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
> > >                vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> > >                break;
> > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > >        default:
> > >                break;
> > > @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct
> > pp_hwmgr *hwmgr,
> > >  {
> > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
> > *)(hwmgr->backend);
> > >
> > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > -             return -EINVAL;
> > > -
> > >        switch (type) {
> > >        case PP_SCLK:
> > >                data->smc_state_table.gfx_boot_level = mask ?
> > (ffs(mask) - 1) : 0;
> >
>


[-- Attachment #1.2: Type: text/html, Size: 30734 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                         ` <CY4PR12MB1687BFBCA906C0B17D52089BFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-01-26 19:32                           ` Felix Kuehling
       [not found]                             ` <cc9e6d84-9720-15fb-15ec-f608f8d9392d-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Felix Kuehling @ 2018-01-26 19:32 UTC (permalink / raw)
  To: Zhu, Rex, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>
> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
> >    interfaces, which affects existing tools
>
>
> Rex: I don't think the patch will affects existing tools.
>
>
> User set "manual" to power_performance_level, and then change the
> clock range through  pp_dpm_sclk/mclk/pcie.
>
>
> with this patch, User dont need to set "manual" command,  if still
> receive the manual command, driver just return sucess to user in order
> not  break existing
>
> tools. 
>

Existing tools and users expect that switching back to auto removes the
manual clock settings. If you allow changing the clock in auto mode,
that won't happen any more.

>
>  >2. You're taking the clock limits out of the power profile.
>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>  >   the compute power profile
>
>
> Rex: In vega10, under default comput mode(with
> busy_set_point/fps/use_rlc_busy/min_active_level set), just two
> performance levels left
> (level0 and level7). and clock just switch between lowest and highest.
>
> I am not sure in this case, driver still can set min sclk/mclk.

One more reason why allowing the user to set pp_dpm_sckl/mclk shouldn't
be allowed in auto-mode.

Regards,
  Felix

>
> Best Regards
> Rex 
>
>
> ------------------------------------------------------------------------
> *From:* Kuehling, Felix
> *Sent:* Saturday, January 27, 2018 12:49 AM
> *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>  
> Hi Rex,
>
> I think I understand what you're trying to do. To summarize my concerns,
> there are two reasons I'm against your plan:
>
>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>     interfaces, which affects existing tools
>  2. You're taking the clock limits out of the power profile.
>     Automatically adjusting the minimum sclk/mclk is a requirement for
>     the compute power profile
>
> Regards,
>   Felix
>
> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
> >
> > Hi Felix,
> >
> >
> > >That would make sense. But switching to manual mode would disable
> > >profiles and automatic profile selection. That was one reason why I
> > >objected to your plan to control profile clock limits using these
> files.
> >
> > Rex:
> >
> >
> > I am not very clear the old logic of gfx/compute power profile switch.
> >
> >
> > But with new sysfs,
> >
> >  
> >
> > The logic is(those sysfs are independent) 
> >
> >  1. configure uphyst/downhyst/min_ativity through power_profile_mode,
> >
> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once this
> > sysffs was called, set the dpm level mode to unknown)
> >
> >       3. adjust power limit through pp_od_power_limit(maybe equal to
> > disable power containment).
> >
> >       
> >
> > In those functions, driver do not check the dpm level mode. 
> >
> > the dpm level mode just used by power_dpm_force_performance_level
> > functions.
> >
> >
> > Best Regards
> >
> > Rex
> >
> >
> >
> >
> >
> > ------------------------------------------------------------------------
> > *From:* Kuehling, Felix
> > *Sent:* Friday, January 26, 2018 8:26 AM
> > *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> > power_dpm_force_performance_level
> >  
> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
> > > I also think about this problem.
> > > just think user should unforced clk level through pp dpm
> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
> > >
> > > The logic seems weird, As we supply many sysfs for adjust clock range.
> > >
> > > We can fix this problem by change current mode to manual mode after
> > > user call pp dpm sclk/mclk/pcie.
> > >
> > > But another think,if user change back the clk range through pp dpm
> clk.
> > >
> > > we are in manual mode, and user set auto mode, in fact, driver change
> > > nothing.
> >
> > With profiles, switching back to auto mode would select the appropriate
> > profile, which may have a different clock mask. For example for compute
> > we enable only the highest two sclk levels.
> >
> > >
> > > Comparatively speaking, better set manual mode after user call pp
> > dpm clk.
> >
> > That would make sense. But switching to manual mode would disable
> > profiles and automatic profile selection. That was one reason why I
> > objected to your plan to control profile clock limits using these files.
> >
> > Regards,
> >   Felix
> >
> > > Thanks very much.
> > >
> > > Best Regards
> > > Rex
> > >
> ------------------------------------------------------------------------
> > > *From:* Kuehling, Felix
> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
> > > *To:* amd-gfx@lists.freedesktop.org; Zhu, Rex
> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> > > power_dpm_force_performance_level
> > >  
> > > This patch breaks unforcing of clocks, which is currently done by
> > > switching back from "manual" to "auto". By removing "manual" mode, you
> > > remove the ability to unset forced clocks.
> > >
> > > Regards,
> > >   Felix
> > >
> > >
> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
> > > > Driver do not maintain manual mode for dpm_force_performance_level,
> > > > User can set sclk/mclk/pcie range through
> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> > > > directly.
> > > >
> > > > In order to not break currently tools,
> > > > when set "manual" to power_dpm_force_performance_level
> > > > driver will do nothing and just return successful.
> > > >
> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> > > > Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
> > > > ---
> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
> > +++++++--------
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
> > > >
> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > > index 1812009..66b4df0 100644
> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > > @@ -152,7 +152,6 @@ static ssize_t
> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
> "auto" :
> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
> "high" :
> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
> > > "manual" :
> > > >                        (level ==
> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
> > > >                        (level ==
> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
> > > >                        (level ==
> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> > > > @@ -186,7 +185,7 @@ static ssize_t
> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
> > > > +             pr_info("No need to set manual mode, Just go
> ahead\n");
> > > >        } else if (strncmp("profile_exit", buf,
> > > strlen("profile_exit")) == 0) {
> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
> > > >        } else if (strncmp("profile_standard", buf,
> > > strlen("profile_standard")) == 0) {
> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > > index ab45232..8ddc978 100644
> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
> > *handle,
> > > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> > > >        struct ci_power_info *pi = ci_get_pi(adev);
> > > > 
> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                if (!pi->sclk_dpm_key_disabled)
> > > > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > > index b9aa9f4..3fab686 100644
> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
> > > > 
> > > >  enum amd_dpm_forced_level {
> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
> > > >  };
> > > > 
> > > >  enum amd_pm_state_type {
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > > index dec8dd9..60d280c 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
> > > pp_hwmgr *hwmgr,
> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
> > > >                enum pp_clock_type type, uint32_t mask)
> > > >  {
> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > > index 409a56b..eddcbcd 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >                                               
> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
> > > >                                               
> > > RAVEN_UMD_PSTATE_MIN_FCLK);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > > index 13db75c..e3a8374 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> > > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
> > > pp_hwmgr *hwmgr,
> > > >  {
> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
> > *)(hwmgr->backend);
> > > > 
> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                if (!data->sclk_dpm_key_disabled)
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > > index 6b28896..828677e 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > > @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
> 1<<sclk_mask);
> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
> 1<<mclk_mask);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct
> > > pp_hwmgr *hwmgr,
> > > >  {
> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
> > > *)(hwmgr->backend);
> > > > 
> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                data->smc_state_table.gfx_boot_level = mask ?
> > > (ffs(mask) - 1) : 0;
> > >
> >
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                             ` <cc9e6d84-9720-15fb-15ec-f608f8d9392d-5C7GfCeVMHo@public.gmane.org>
@ 2018-01-26 20:08                               ` Zhu, Rex
       [not found]                                 ` <CY4PR12MB1687274014BDD739BE44DF6CFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Zhu, Rex @ 2018-01-26 20:08 UTC (permalink / raw)
  To: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW


[-- Attachment #1.1: Type: text/plain, Size: 15925 bytes --]

>Existing tools and users expect that switching back to auto removes the
>manual clock settings. If you allow changing the clock in auto mode,
>that won't happen any more.

I have sent the patch v2 to fix this problem. user can swith back auto mode and all manual clock setting will be removed.


>One more reason why allowing the user to set pp_dpm_sckl/mclk shouldn't be allowed in auto-mode.

this is an old logic, maybe ref radeon driver.
Driver can allow to set pp_dpm_sclk/mclk range in auto/high/low mode.

Best Regards
Rex
________________________________
From: Kuehling, Felix
Sent: Saturday, January 27, 2018 3:32 AM
To: Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level

On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>
> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
> >    interfaces, which affects existing tools
>
>
> Rex: I don't think the patch will affects existing tools.
>
>
> User set "manual" to power_performance_level, and then change the
> clock range through  pp_dpm_sclk/mclk/pcie.
>
>
> with this patch, User dont need to set "manual" command,  if still
> receive the manual command, driver just return sucess to user in order
> not  break existing
>
> tools.
>

Existing tools and users expect that switching back to auto removes the
manual clock settings. If you allow changing the clock in auto mode,
that won't happen any more.

>
>  >2. You're taking the clock limits out of the power profile.
>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>  >   the compute power profile
>
>
> Rex: In vega10, under default comput mode(with
> busy_set_point/fps/use_rlc_busy/min_active_level set), just two
> performance levels left
> (level0 and level7). and clock just switch between lowest and highest.
>
> I am not sure in this case, driver still can set min sclk/mclk.

One more reason why allowing the user to set pp_dpm_sckl/mclk shouldn't
be allowed in auto-mode.

Regards,
  Felix

>
> Best Regards
> Rex
>
>
> ------------------------------------------------------------------------
> *From:* Kuehling, Felix
> *Sent:* Saturday, January 27, 2018 12:49 AM
> *To:* Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>
> Hi Rex,
>
> I think I understand what you're trying to do. To summarize my concerns,
> there are two reasons I'm against your plan:
>
>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>     interfaces, which affects existing tools
>  2. You're taking the clock limits out of the power profile.
>     Automatically adjusting the minimum sclk/mclk is a requirement for
>     the compute power profile
>
> Regards,
>   Felix
>
> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
> >
> > Hi Felix,
> >
> >
> > >That would make sense. But switching to manual mode would disable
> > >profiles and automatic profile selection. That was one reason why I
> > >objected to your plan to control profile clock limits using these
> files.
> >
> > Rex:
> >
> >
> > I am not very clear the old logic of gfx/compute power profile switch.
> >
> >
> > But with new sysfs,
> >
> >
> >
> > The logic is(those sysfs are independent)
> >
> >  1. configure uphyst/downhyst/min_ativity through power_profile_mode,
> >
> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once this
> > sysffs was called, set the dpm level mode to unknown)
> >
> >       3. adjust power limit through pp_od_power_limit(maybe equal to
> > disable power containment).
> >
> >
> >
> > In those functions, driver do not check the dpm level mode.
> >
> > the dpm level mode just used by power_dpm_force_performance_level
> > functions.
> >
> >
> > Best Regards
> >
> > Rex
> >
> >
> >
> >
> >
> > ------------------------------------------------------------------------
> > *From:* Kuehling, Felix
> > *Sent:* Friday, January 26, 2018 8:26 AM
> > *To:* Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> > power_dpm_force_performance_level
> >
> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
> > > I also think about this problem.
> > > just think user should unforced clk level through pp dpm
> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
> > >
> > > The logic seems weird, As we supply many sysfs for adjust clock range.
> > >
> > > We can fix this problem by change current mode to manual mode after
> > > user call pp dpm sclk/mclk/pcie.
> > >
> > > But another think,if user change back the clk range through pp dpm
> clk.
> > >
> > > we are in manual mode, and user set auto mode, in fact, driver change
> > > nothing.
> >
> > With profiles, switching back to auto mode would select the appropriate
> > profile, which may have a different clock mask. For example for compute
> > we enable only the highest two sclk levels.
> >
> > >
> > > Comparatively speaking, better set manual mode after user call pp
> > dpm clk.
> >
> > That would make sense. But switching to manual mode would disable
> > profiles and automatic profile selection. That was one reason why I
> > objected to your plan to control profile clock limits using these files.
> >
> > Regards,
> >   Felix
> >
> > > Thanks very much.
> > >
> > > Best Regards
> > > Rex
> > >
> ------------------------------------------------------------------------
> > > *From:* Kuehling, Felix
> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
> > > *To:* amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org; Zhu, Rex
> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> > > power_dpm_force_performance_level
> > >
> > > This patch breaks unforcing of clocks, which is currently done by
> > > switching back from "manual" to "auto". By removing "manual" mode, you
> > > remove the ability to unset forced clocks.
> > >
> > > Regards,
> > >   Felix
> > >
> > >
> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
> > > > Driver do not maintain manual mode for dpm_force_performance_level,
> > > > User can set sclk/mclk/pcie range through
> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
> > > > directly.
> > > >
> > > > In order to not break currently tools,
> > > > when set "manual" to power_dpm_force_performance_level
> > > > driver will do nothing and just return successful.
> > > >
> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
> > > > Signed-off-by: Rex Zhu <Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
> > > > ---
> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
> > +++++++--------
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
> > > >
> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > > index 1812009..66b4df0 100644
> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > > > @@ -152,7 +152,6 @@ static ssize_t
> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
> "auto" :
> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
> "high" :
> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
> > > "manual" :
> > > >                        (level ==
> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
> > > >                        (level ==
> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
> > > >                        (level ==
> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
> > > > @@ -186,7 +185,7 @@ static ssize_t
> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
> > > > +             pr_info("No need to set manual mode, Just go
> ahead\n");
> > > >        } else if (strncmp("profile_exit", buf,
> > > strlen("profile_exit")) == 0) {
> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
> > > >        } else if (strncmp("profile_standard", buf,
> > > strlen("profile_standard")) == 0) {
> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > > index ab45232..8ddc978 100644
> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
> > *handle,
> > > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> > > >        struct ci_power_info *pi = ci_get_pi(adev);
> > > >
> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                if (!pi->sclk_dpm_key_disabled)
> > > > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > > index b9aa9f4..3fab686 100644
> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
> > > >
> > > >  enum amd_dpm_forced_level {
> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
> > > >  };
> > > >
> > > >  enum amd_pm_state_type {
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > > index dec8dd9..60d280c 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
> > > pp_hwmgr *hwmgr,
> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
> > > >                enum pp_clock_type type, uint32_t mask)
> > > >  {
> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > > index 409a56b..eddcbcd 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >
> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
> > > >
> > > RAVEN_UMD_PSTATE_MIN_FCLK);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > > index 13db75c..e3a8374 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
> > > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
> > > pp_hwmgr *hwmgr,
> > > >  {
> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
> > *)(hwmgr->backend);
> > > >
> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                if (!data->sclk_dpm_key_disabled)
> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > > index 6b28896..828677e 100644
> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > > > @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct
> > > pp_hwmgr *hwmgr,
> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
> 1<<sclk_mask);
> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
> 1<<mclk_mask);
> > > >                break;
> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> > > >        default:
> > > >                break;
> > > > @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct
> > > pp_hwmgr *hwmgr,
> > > >  {
> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
> > > *)(hwmgr->backend);
> > > >
> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
> > > > -             return -EINVAL;
> > > > -
> > > >        switch (type) {
> > > >        case PP_SCLK:
> > > >                data->smc_state_table.gfx_boot_level = mask ?
> > > (ffs(mask) - 1) : 0;
> > >
> >
>


[-- Attachment #1.2: Type: text/html, Size: 30103 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                                 ` <CY4PR12MB1687274014BDD739BE44DF6CFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-01-26 23:51                                   ` Alex Deucher
       [not found]                                     ` <CADnq5_Ni6j8ONe7f5rDMprbeB6Mq1RVXJAonUO2VTp+1Dgf+Gw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Alex Deucher @ 2018-01-26 23:51 UTC (permalink / raw)
  To: Zhu, Rex; +Cc: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

I think we have two use cases for the profiles:

1. automatic profile switching for different driver use cases
2. manually tweaking profiles/clocks/power for testing

How about we make the profile selection dependent on selecting the
manual force_performance_level and not add an auto to the profile
selector.  Then when you select manual you can tweak the clocks and
profile heuristics and power containment via their respective knobs.


Alex

On Fri, Jan 26, 2018 at 3:08 PM, Zhu, Rex <Rex.Zhu@amd.com> wrote:
>>Existing tools and users expect that switching back to auto removes the
>>manual clock settings. If you allow changing the clock in auto mode,
>>that won't happen any more.
>
>
> I have sent the patch v2 to fix this problem. user can swith back auto mode
> and all manual clock setting will be removed.
>
>
>>One more reason why allowing the user to set pp_dpm_sckl/mclk shouldn't be
>> allowed in auto-mode.
>
> this is an old logic, maybe ref radeon driver.
> Driver can allow to set pp_dpm_sclk/mclk range in auto/high/low mode.
>
> Best Regards
> Rex
> ________________________________
> From: Kuehling, Felix
> Sent: Saturday, January 27, 2018 3:32 AM
> To: Zhu, Rex; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>
> On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>>
>> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>> >    interfaces, which affects existing tools
>>
>>
>> Rex: I don't think the patch will affects existing tools.
>>
>>
>> User set "manual" to power_performance_level, and then change the
>> clock range through  pp_dpm_sclk/mclk/pcie.
>>
>>
>> with this patch, User dont need to set "manual" command,  if still
>> receive the manual command, driver just return sucess to user in order
>> not  break existing
>>
>> tools.
>>
>
> Existing tools and users expect that switching back to auto removes the
> manual clock settings. If you allow changing the clock in auto mode,
> that won't happen any more.
>
>>
>>  >2. You're taking the clock limits out of the power profile.
>>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>>  >   the compute power profile
>>
>>
>> Rex: In vega10, under default comput mode(with
>> busy_set_point/fps/use_rlc_busy/min_active_level set), just two
>> performance levels left
>> (level0 and level7). and clock just switch between lowest and highest.
>>
>> I am not sure in this case, driver still can set min sclk/mclk.
>
> One more reason why allowing the user to set pp_dpm_sckl/mclk shouldn't
> be allowed in auto-mode.
>
> Regards,
>   Felix
>
>>
>> Best Regards
>> Rex
>>
>>
>> ------------------------------------------------------------------------
>> *From:* Kuehling, Felix
>> *Sent:* Saturday, January 27, 2018 12:49 AM
>> *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>> power_dpm_force_performance_level
>>
>> Hi Rex,
>>
>> I think I understand what you're trying to do. To summarize my concerns,
>> there are two reasons I'm against your plan:
>>
>>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>     interfaces, which affects existing tools
>>  2. You're taking the clock limits out of the power profile.
>>     Automatically adjusting the minimum sclk/mclk is a requirement for
>>     the compute power profile
>>
>> Regards,
>>   Felix
>>
>> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>> >
>> > Hi Felix,
>> >
>> >
>> > >That would make sense. But switching to manual mode would disable
>> > >profiles and automatic profile selection. That was one reason why I
>> > >objected to your plan to control profile clock limits using these
>> files.
>> >
>> > Rex:
>> >
>> >
>> > I am not very clear the old logic of gfx/compute power profile switch.
>> >
>> >
>> > But with new sysfs,
>> >
>> >
>> >
>> > The logic is(those sysfs are independent)
>> >
>> >  1. configure uphyst/downhyst/min_ativity through power_profile_mode,
>> >
>> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once this
>> > sysffs was called, set the dpm level mode to unknown)
>> >
>> >       3. adjust power limit through pp_od_power_limit(maybe equal to
>> > disable power containment).
>> >
>> >
>> >
>> > In those functions, driver do not check the dpm level mode.
>> >
>> > the dpm level mode just used by power_dpm_force_performance_level
>> > functions.
>> >
>> >
>> > Best Regards
>> >
>> > Rex
>> >
>> >
>> >
>> >
>> >
>> > ------------------------------------------------------------------------
>> > *From:* Kuehling, Felix
>> > *Sent:* Friday, January 26, 2018 8:26 AM
>> > *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>> > power_dpm_force_performance_level
>> >
>> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
>> > > I also think about this problem.
>> > > just think user should unforced clk level through pp dpm
>> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
>> > >
>> > > The logic seems weird, As we supply many sysfs for adjust clock range.
>> > >
>> > > We can fix this problem by change current mode to manual mode after
>> > > user call pp dpm sclk/mclk/pcie.
>> > >
>> > > But another think,if user change back the clk range through pp dpm
>> clk.
>> > >
>> > > we are in manual mode, and user set auto mode, in fact, driver change
>> > > nothing.
>> >
>> > With profiles, switching back to auto mode would select the appropriate
>> > profile, which may have a different clock mask. For example for compute
>> > we enable only the highest two sclk levels.
>> >
>> > >
>> > > Comparatively speaking, better set manual mode after user call pp
>> > dpm clk.
>> >
>> > That would make sense. But switching to manual mode would disable
>> > profiles and automatic profile selection. That was one reason why I
>> > objected to your plan to control profile clock limits using these files.
>> >
>> > Regards,
>> >   Felix
>> >
>> > > Thanks very much.
>> > >
>> > > Best Regards
>> > > Rex
>> > >
>> ------------------------------------------------------------------------
>> > > *From:* Kuehling, Felix
>> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
>> > > *To:* amd-gfx@lists.freedesktop.org; Zhu, Rex
>> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>> > > power_dpm_force_performance_level
>> > >
>> > > This patch breaks unforcing of clocks, which is currently done by
>> > > switching back from "manual" to "auto". By removing "manual" mode, you
>> > > remove the ability to unset forced clocks.
>> > >
>> > > Regards,
>> > >   Felix
>> > >
>> > >
>> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
>> > > > Driver do not maintain manual mode for dpm_force_performance_level,
>> > > > User can set sclk/mclk/pcie range through
>> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
>> > > > directly.
>> > > >
>> > > > In order to not break currently tools,
>> > > > when set "manual" to power_dpm_force_performance_level
>> > > > driver will do nothing and just return successful.
>> > > >
>> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
>> > > > Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
>> > > > ---
>> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
>> > +++++++--------
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
>> > > >
>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > > index 1812009..66b4df0 100644
>> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > > @@ -152,7 +152,6 @@ static ssize_t
>> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
>> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
>> "auto" :
>> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
>> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
>> "high" :
>> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
>> > > "manual" :
>> > > >                        (level ==
>> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>> > > >                        (level ==
>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>> > > >                        (level ==
>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
>> > > > @@ -186,7 +185,7 @@ static ssize_t
>> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
>> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
>> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
>> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
>> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
>> > > > +             pr_info("No need to set manual mode, Just go
>> ahead\n");
>> > > >        } else if (strncmp("profile_exit", buf,
>> > > strlen("profile_exit")) == 0) {
>> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>> > > >        } else if (strncmp("profile_standard", buf,
>> > > strlen("profile_standard")) == 0) {
>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > > index ab45232..8ddc978 100644
>> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
>> > *handle,
>> > > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>> > > >        struct ci_power_info *pi = ci_get_pi(adev);
>> > > >
>> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                if (!pi->sclk_dpm_key_disabled)
>> > > > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > > index b9aa9f4..3fab686 100644
>> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
>> > > >
>> > > >  enum amd_dpm_forced_level {
>> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
>> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
>> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
>> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
>> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
>> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>> > > >  };
>> > > >
>> > > >  enum amd_pm_state_type {
>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > > index dec8dd9..60d280c 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
>> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>> > > >                enum pp_clock_type type, uint32_t mask)
>> > > >  {
>> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > > index 409a56b..eddcbcd 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >
>> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
>> > > >
>> > > RAVEN_UMD_PSTATE_MIN_FCLK);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > > index 13db75c..e3a8374 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>> > > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >  {
>> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
>> > *)(hwmgr->backend);
>> > > >
>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
>> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                if (!data->sclk_dpm_key_disabled)
>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > > index 6b28896..828677e 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > > @@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
>> 1<<sclk_mask);
>> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
>> 1<<mclk_mask);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > @@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >  {
>> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
>> > > *)(hwmgr->backend);
>> > > >
>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                data->smc_state_table.gfx_boot_level = mask ?
>> > > (ffs(mask) - 1) : 0;
>> > >
>> >
>>
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                                     ` <CADnq5_Ni6j8ONe7f5rDMprbeB6Mq1RVXJAonUO2VTp+1Dgf+Gw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-01-29 12:03                                       ` Zhu, Rex
       [not found]                                         ` <CY4PR12MB1687A63BA8F717170700292DFBE50-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Zhu, Rex @ 2018-01-29 12:03 UTC (permalink / raw)
  To: 'Alex Deucher'
  Cc: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

[-- Attachment #1: Type: text/plain, Size: 18274 bytes --]

Hi Alex,

>How about we make the profile selection dependent on selecting the manual force_performance_level

If so, we need to check pm.dpm.forced_level  before "profile heuristics" and "disable power containment" as same as pp_dpm_sclk/mclk/pcie. 

if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
                               AMD_DPM_FORCED_LEVEL_LOW |
                               AMD_DPM_FORCED_LEVEL_HIGH))
               return -EINVAL; 

How about delete judging code before force_clock_level and just change force_level to manual after force_clock_level. Please see the attached patch.

This change have no impact on existing interface and tools. 
It just refine the logic of power_dpm_force_performance_level and pp_dpm_sclk/mclk/pcie.
Also has no impact to power/computer profile mode on smu7.

The difference is:

New user can have one less command need to input if they want to use pp_dpm_sclk/mclk/pcie.

 
Best Regards
Rex
-----Original Message-----
From: Alex Deucher [mailto:alexdeucher@gmail.com] 
Sent: Saturday, January 27, 2018 7:51 AM
To: Zhu, Rex
Cc: Kuehling, Felix; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level

I think we have two use cases for the profiles:

1. automatic profile switching for different driver use cases 2. manually tweaking profiles/clocks/power for testing

How about we make the profile selection dependent on selecting the manual force_performance_level and not add an auto to the profile selector.  Then when you select manual you can tweak the clocks and profile heuristics and power containment via their respective knobs.


Alex

On Fri, Jan 26, 2018 at 3:08 PM, Zhu, Rex <Rex.Zhu@amd.com> wrote:
>>Existing tools and users expect that switching back to auto removes 
>>the manual clock settings. If you allow changing the clock in auto 
>>mode, that won't happen any more.
>
>
> I have sent the patch v2 to fix this problem. user can swith back auto 
> mode and all manual clock setting will be removed.
>
>
>>One more reason why allowing the user to set pp_dpm_sckl/mclk 
>>shouldn't be  allowed in auto-mode.
>
> this is an old logic, maybe ref radeon driver.
> Driver can allow to set pp_dpm_sclk/mclk range in auto/high/low mode.
>
> Best Regards
> Rex
> ________________________________
> From: Kuehling, Felix
> Sent: Saturday, January 27, 2018 3:32 AM
> To: Zhu, Rex; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for 
> power_dpm_force_performance_level
>
> On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>>
>> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>> >    interfaces, which affects existing tools
>>
>>
>> Rex: I don't think the patch will affects existing tools.
>>
>>
>> User set "manual" to power_performance_level, and then change the 
>> clock range through  pp_dpm_sclk/mclk/pcie.
>>
>>
>> with this patch, User dont need to set "manual" command,  if still 
>> receive the manual command, driver just return sucess to user in 
>> order not  break existing
>>
>> tools.
>>
>
> Existing tools and users expect that switching back to auto removes 
> the manual clock settings. If you allow changing the clock in auto 
> mode, that won't happen any more.
>
>>
>>  >2. You're taking the clock limits out of the power profile.
>>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>>  >   the compute power profile
>>
>>
>> Rex: In vega10, under default comput mode(with 
>> busy_set_point/fps/use_rlc_busy/min_active_level set), just two 
>> performance levels left
>> (level0 and level7). and clock just switch between lowest and highest.
>>
>> I am not sure in this case, driver still can set min sclk/mclk.
>
> One more reason why allowing the user to set pp_dpm_sckl/mclk 
> shouldn't be allowed in auto-mode.
>
> Regards,
>   Felix
>
>>
>> Best Regards
>> Rex
>>
>>
>> ---------------------------------------------------------------------
>> ---
>> *From:* Kuehling, Felix
>> *Sent:* Saturday, January 27, 2018 12:49 AM
>> *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for 
>> power_dpm_force_performance_level
>>
>> Hi Rex,
>>
>> I think I understand what you're trying to do. To summarize my 
>> concerns, there are two reasons I'm against your plan:
>>
>>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>     interfaces, which affects existing tools  2. You're taking the 
>> clock limits out of the power profile.
>>     Automatically adjusting the minimum sclk/mclk is a requirement for
>>     the compute power profile
>>
>> Regards,
>>   Felix
>>
>> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>> >
>> > Hi Felix,
>> >
>> >
>> > >That would make sense. But switching to manual mode would disable 
>> > >profiles and automatic profile selection. That was one reason why 
>> > >I objected to your plan to control profile clock limits using 
>> > >these
>> files.
>> >
>> > Rex:
>> >
>> >
>> > I am not very clear the old logic of gfx/compute power profile switch.
>> >
>> >
>> > But with new sysfs,
>> >
>> >
>> >
>> > The logic is(those sysfs are independent)
>> >
>> >  1. configure uphyst/downhyst/min_ativity through 
>> > power_profile_mode,
>> >
>> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once 
>> > this sysffs was called, set the dpm level mode to unknown)
>> >
>> >       3. adjust power limit through pp_od_power_limit(maybe equal 
>> > to disable power containment).
>> >
>> >
>> >
>> > In those functions, driver do not check the dpm level mode.
>> >
>> > the dpm level mode just used by power_dpm_force_performance_level 
>> > functions.
>> >
>> >
>> > Best Regards
>> >
>> > Rex
>> >
>> >
>> >
>> >
>> >
>> > -------------------------------------------------------------------
>> > -----
>> > *From:* Kuehling, Felix
>> > *Sent:* Friday, January 26, 2018 8:26 AM
>> > *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for 
>> > power_dpm_force_performance_level
>> >
>> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
>> > > I also think about this problem.
>> > > just think user should unforced clk level through pp dpm 
>> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
>> > >
>> > > The logic seems weird, As we supply many sysfs for adjust clock range.
>> > >
>> > > We can fix this problem by change current mode to manual mode 
>> > > after user call pp dpm sclk/mclk/pcie.
>> > >
>> > > But another think,if user change back the clk range through pp 
>> > > dpm
>> clk.
>> > >
>> > > we are in manual mode, and user set auto mode, in fact, driver 
>> > > change nothing.
>> >
>> > With profiles, switching back to auto mode would select the 
>> > appropriate profile, which may have a different clock mask. For 
>> > example for compute we enable only the highest two sclk levels.
>> >
>> > >
>> > > Comparatively speaking, better set manual mode after user call pp
>> > dpm clk.
>> >
>> > That would make sense. But switching to manual mode would disable 
>> > profiles and automatic profile selection. That was one reason why I 
>> > objected to your plan to control profile clock limits using these files.
>> >
>> > Regards,
>> >   Felix
>> >
>> > > Thanks very much.
>> > >
>> > > Best Regards
>> > > Rex
>> > >
>> ---------------------------------------------------------------------
>> ---
>> > > *From:* Kuehling, Felix
>> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
>> > > *To:* amd-gfx@lists.freedesktop.org; Zhu, Rex
>> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for 
>> > > power_dpm_force_performance_level
>> > >
>> > > This patch breaks unforcing of clocks, which is currently done by 
>> > > switching back from "manual" to "auto". By removing "manual" 
>> > > mode, you remove the ability to unset forced clocks.
>> > >
>> > > Regards,
>> > >   Felix
>> > >
>> > >
>> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
>> > > > Driver do not maintain manual mode for 
>> > > > dpm_force_performance_level, User can set sclk/mclk/pcie range 
>> > > > through
>> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
>> > > > directly.
>> > > >
>> > > > In order to not break currently tools, when set "manual" to 
>> > > > power_dpm_force_performance_level driver will do nothing and 
>> > > > just return successful.
>> > > >
>> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
>> > > > Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
>> > > > ---
>> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
>> > +++++++--------
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
>> > > >
>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > > index 1812009..66b4df0 100644
>> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> > > > @@ -152,7 +152,6 @@ static ssize_t
>> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
>> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
>> "auto" :
>> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
>> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
>> "high" :
>> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
>> > > "manual" :
>> > > >                        (level ==
>> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>> > > >                        (level ==
>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>> > > >                        (level ==
>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
>> > > > @@ -186,7 +185,7 @@ static ssize_t
>> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
>> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
>> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
>> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
>> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
>> > > > +             pr_info("No need to set manual mode, Just go
>> ahead\n");
>> > > >        } else if (strncmp("profile_exit", buf,
>> > > strlen("profile_exit")) == 0) {
>> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>> > > >        } else if (strncmp("profile_standard", buf,
>> > > strlen("profile_standard")) == 0) {
>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > > index ab45232..8ddc978 100644
>> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
>> > *handle,
>> > > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>> > > >        struct ci_power_info *pi = ci_get_pi(adev);
>> > > >
>> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                if (!pi->sclk_dpm_key_disabled) diff --git 
>> > > > a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > > index b9aa9f4..3fab686 100644
>> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
>> > > >
>> > > >  enum amd_dpm_forced_level {
>> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
>> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
>> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
>> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
>> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
>> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>> > > >  };
>> > > >
>> > > >  enum amd_pm_state_type {
>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > > index dec8dd9..60d280c 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
>> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>> > > >                enum pp_clock_type type, uint32_t mask)  {
>> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > > index 409a56b..eddcbcd 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >
>> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
>> > > >
>> > > RAVEN_UMD_PSTATE_MIN_FCLK);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > > index 13db75c..e3a8374 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>> > > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >  {
>> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
>> > *)(hwmgr->backend);
>> > > >
>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
>> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                if (!data->sclk_dpm_key_disabled) diff --git 
>> > > > a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > > index 6b28896..828677e 100644
>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>> > > > @@ -4241,7 +4241,6 @@ static int 
>> > > > vega10_dpm_force_dpm_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
>> 1<<sclk_mask);
>> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
>> 1<<mclk_mask);
>> > > >                break;
>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>> > > >        default:
>> > > >                break;
>> > > > @@ -4500,11 +4499,6 @@ static int 
>> > > > vega10_force_clock_level(struct
>> > > pp_hwmgr *hwmgr,
>> > > >  {
>> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
>> > > *)(hwmgr->backend);
>> > > >
>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>> > > > -             return -EINVAL;
>> > > > -
>> > > >        switch (type) {
>> > > >        case PP_SCLK:
>> > > >                data->smc_state_table.gfx_boot_level = mask ?
>> > > (ffs(mask) - 1) : 0;
>> > >
>> >
>>
>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

[-- Attachment #2: 0001-drm-amd-pp-Refine-manual-mode-for-power_dpm_force_pe.patch --]
[-- Type: application/octet-stream, Size: 6305 bytes --]

From d58a303fea24c777fbdab41a0af0bf981ffe5ecf Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 29 Jan 2018 15:09:01 +0800
Subject: [PATCH] drm/amd/pp: Refine manual mode for
 power_dpm_force_performance_level

Delete current performance mode code check before change clock range
through pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie, just notify user
that the performance mode has been changed by setting the mode
to manual.

Change-Id: I9cdedee7f8f6dd320458ff905680dde1932aded7
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             | 2 +-
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c                | 7 +------
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c      | 1 +
 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     | 4 ----
 drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     | 1 -
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   | 6 ------
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 6 ------
 7 files changed, 3 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 39ef93a..b0cdb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -186,7 +186,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
 		level = AMD_DPM_FORCED_LEVEL_AUTO;
 	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
-		level = AMD_DPM_FORCED_LEVEL_MANUAL;
+		return count;
 	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
 		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
 	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5ceb5a2..cf4b55e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct ci_power_info *pi = ci_get_pi(adev);
 
-	if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-				AMD_DPM_FORCED_LEVEL_LOW |
-				AMD_DPM_FORCED_LEVEL_HIGH))
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		if (!pi->sclk_dpm_key_disabled)
@@ -6676,7 +6671,7 @@ static int ci_dpm_force_clock_level(void *handle,
 	default:
 		break;
 	}
-
+	adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_MANUAL;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 173382c..86c5e28 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -840,6 +840,7 @@ static int pp_dpm_force_clock_level(void *handle,
 	}
 	mutex_lock(&pp_handle->pp_lock);
 	hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
+	hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_MANUAL;
 	mutex_unlock(&pp_handle->pp_lock);
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 1394b2b..fe6e161 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 	case AMD_DPM_FORCED_LEVEL_AUTO:
 		ret = cz_phm_unforce_dpm_levels(hwmgr);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
@@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
 static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, uint32_t mask)
 {
-	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		smum_send_msg_to_smc_with_parameter(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 409a56b..eddcbcd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 						PPSMC_MSG_SetSoftMaxFclkByFreq,
 						RAVEN_UMD_PSTATE_MIN_FCLK);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 715880b..ec12fba 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
 		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
@@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-					AMD_DPM_FORCED_LEVEL_LOW |
-					AMD_DPM_FORCED_LEVEL_HIGH))
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		if (!data->sclk_dpm_key_disabled)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 4c259cd..47b8583 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4241,7 +4241,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 		vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
 		vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
 		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
@@ -4500,11 +4499,6 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
 {
 	struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
 
-	if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-				AMD_DPM_FORCED_LEVEL_LOW |
-				AMD_DPM_FORCED_LEVEL_HIGH))
-		return -EINVAL;
-
 	switch (type) {
 	case PP_SCLK:
 		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
-- 
1.9.1


[-- Attachment #3: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                                         ` <CY4PR12MB1687A63BA8F717170700292DFBE50-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-01-29 18:02                                           ` Alex Deucher
       [not found]                                             ` <CADnq5_Njpv+OnXRD0bo4ZefjxR8LLnfsyTCoTmdmYzgYAuBXOA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Alex Deucher @ 2018-01-29 18:02 UTC (permalink / raw)
  To: Zhu, Rex; +Cc: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Mon, Jan 29, 2018 at 7:03 AM, Zhu, Rex <Rex.Zhu@amd.com> wrote:
> Hi Alex,
>
>>How about we make the profile selection dependent on selecting the manual force_performance_level
>
> If so, we need to check pm.dpm.forced_level  before "profile heuristics" and "disable power containment" as same as pp_dpm_sclk/mclk/pcie.
>
> if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>                                AMD_DPM_FORCED_LEVEL_LOW |
>                                AMD_DPM_FORCED_LEVEL_HIGH))
>                return -EINVAL;
>
> How about delete judging code before force_clock_level and just change force_level to manual after force_clock_level. Please see the attached patch.
>
> This change have no impact on existing interface and tools.
> It just refine the logic of power_dpm_force_performance_level and pp_dpm_sclk/mclk/pcie.
> Also has no impact to power/computer profile mode on smu7.
>
> The difference is:
>
> New user can have one less command need to input if they want to use pp_dpm_sclk/mclk/pcie.

I'd prefer to keep the requirement to select manual mode to be able to
manually mess with the clock levels.  This also makes simplifies the
profile interface by only allowing you to change the profile if you
select manual mode first.  That way we don't have to add an AUTO
profile to the profile selection to let the driver pick the profile.
that can happen automatically if the user sets the force_performance
level to auto.

Alex

>
>
> Best Regards
> Rex
> -----Original Message-----
> From: Alex Deucher [mailto:alexdeucher@gmail.com]
> Sent: Saturday, January 27, 2018 7:51 AM
> To: Zhu, Rex
> Cc: Kuehling, Felix; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
>
> I think we have two use cases for the profiles:
>
> 1. automatic profile switching for different driver use cases 2. manually tweaking profiles/clocks/power for testing
>
> How about we make the profile selection dependent on selecting the manual force_performance_level and not add an auto to the profile selector.  Then when you select manual you can tweak the clocks and profile heuristics and power containment via their respective knobs.
>
>
> Alex
>
> On Fri, Jan 26, 2018 at 3:08 PM, Zhu, Rex <Rex.Zhu@amd.com> wrote:
>>>Existing tools and users expect that switching back to auto removes
>>>the manual clock settings. If you allow changing the clock in auto
>>>mode, that won't happen any more.
>>
>>
>> I have sent the patch v2 to fix this problem. user can swith back auto
>> mode and all manual clock setting will be removed.
>>
>>
>>>One more reason why allowing the user to set pp_dpm_sckl/mclk
>>>shouldn't be  allowed in auto-mode.
>>
>> this is an old logic, maybe ref radeon driver.
>> Driver can allow to set pp_dpm_sclk/mclk range in auto/high/low mode.
>>
>> Best Regards
>> Rex
>> ________________________________
>> From: Kuehling, Felix
>> Sent: Saturday, January 27, 2018 3:32 AM
>> To: Zhu, Rex; amd-gfx@lists.freedesktop.org
>> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>> power_dpm_force_performance_level
>>
>> On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>>>
>>> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>> >    interfaces, which affects existing tools
>>>
>>>
>>> Rex: I don't think the patch will affects existing tools.
>>>
>>>
>>> User set "manual" to power_performance_level, and then change the
>>> clock range through  pp_dpm_sclk/mclk/pcie.
>>>
>>>
>>> with this patch, User dont need to set "manual" command,  if still
>>> receive the manual command, driver just return sucess to user in
>>> order not  break existing
>>>
>>> tools.
>>>
>>
>> Existing tools and users expect that switching back to auto removes
>> the manual clock settings. If you allow changing the clock in auto
>> mode, that won't happen any more.
>>
>>>
>>>  >2. You're taking the clock limits out of the power profile.
>>>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>>>  >   the compute power profile
>>>
>>>
>>> Rex: In vega10, under default comput mode(with
>>> busy_set_point/fps/use_rlc_busy/min_active_level set), just two
>>> performance levels left
>>> (level0 and level7). and clock just switch between lowest and highest.
>>>
>>> I am not sure in this case, driver still can set min sclk/mclk.
>>
>> One more reason why allowing the user to set pp_dpm_sckl/mclk
>> shouldn't be allowed in auto-mode.
>>
>> Regards,
>>   Felix
>>
>>>
>>> Best Regards
>>> Rex
>>>
>>>
>>> ---------------------------------------------------------------------
>>> ---
>>> *From:* Kuehling, Felix
>>> *Sent:* Saturday, January 27, 2018 12:49 AM
>>> *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>>> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> power_dpm_force_performance_level
>>>
>>> Hi Rex,
>>>
>>> I think I understand what you're trying to do. To summarize my
>>> concerns, there are two reasons I'm against your plan:
>>>
>>>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>>     interfaces, which affects existing tools  2. You're taking the
>>> clock limits out of the power profile.
>>>     Automatically adjusting the minimum sclk/mclk is a requirement for
>>>     the compute power profile
>>>
>>> Regards,
>>>   Felix
>>>
>>> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>>> >
>>> > Hi Felix,
>>> >
>>> >
>>> > >That would make sense. But switching to manual mode would disable
>>> > >profiles and automatic profile selection. That was one reason why
>>> > >I objected to your plan to control profile clock limits using
>>> > >these
>>> files.
>>> >
>>> > Rex:
>>> >
>>> >
>>> > I am not very clear the old logic of gfx/compute power profile switch.
>>> >
>>> >
>>> > But with new sysfs,
>>> >
>>> >
>>> >
>>> > The logic is(those sysfs are independent)
>>> >
>>> >  1. configure uphyst/downhyst/min_ativity through
>>> > power_profile_mode,
>>> >
>>> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once
>>> > this sysffs was called, set the dpm level mode to unknown)
>>> >
>>> >       3. adjust power limit through pp_od_power_limit(maybe equal
>>> > to disable power containment).
>>> >
>>> >
>>> >
>>> > In those functions, driver do not check the dpm level mode.
>>> >
>>> > the dpm level mode just used by power_dpm_force_performance_level
>>> > functions.
>>> >
>>> >
>>> > Best Regards
>>> >
>>> > Rex
>>> >
>>> >
>>> >
>>> >
>>> >
>>> > -------------------------------------------------------------------
>>> > -----
>>> > *From:* Kuehling, Felix
>>> > *Sent:* Friday, January 26, 2018 8:26 AM
>>> > *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>>> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> > power_dpm_force_performance_level
>>> >
>>> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
>>> > > I also think about this problem.
>>> > > just think user should unforced clk level through pp dpm
>>> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
>>> > >
>>> > > The logic seems weird, As we supply many sysfs for adjust clock range.
>>> > >
>>> > > We can fix this problem by change current mode to manual mode
>>> > > after user call pp dpm sclk/mclk/pcie.
>>> > >
>>> > > But another think,if user change back the clk range through pp
>>> > > dpm
>>> clk.
>>> > >
>>> > > we are in manual mode, and user set auto mode, in fact, driver
>>> > > change nothing.
>>> >
>>> > With profiles, switching back to auto mode would select the
>>> > appropriate profile, which may have a different clock mask. For
>>> > example for compute we enable only the highest two sclk levels.
>>> >
>>> > >
>>> > > Comparatively speaking, better set manual mode after user call pp
>>> > dpm clk.
>>> >
>>> > That would make sense. But switching to manual mode would disable
>>> > profiles and automatic profile selection. That was one reason why I
>>> > objected to your plan to control profile clock limits using these files.
>>> >
>>> > Regards,
>>> >   Felix
>>> >
>>> > > Thanks very much.
>>> > >
>>> > > Best Regards
>>> > > Rex
>>> > >
>>> ---------------------------------------------------------------------
>>> ---
>>> > > *From:* Kuehling, Felix
>>> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
>>> > > *To:* amd-gfx@lists.freedesktop.org; Zhu, Rex
>>> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> > > power_dpm_force_performance_level
>>> > >
>>> > > This patch breaks unforcing of clocks, which is currently done by
>>> > > switching back from "manual" to "auto". By removing "manual"
>>> > > mode, you remove the ability to unset forced clocks.
>>> > >
>>> > > Regards,
>>> > >   Felix
>>> > >
>>> > >
>>> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
>>> > > > Driver do not maintain manual mode for
>>> > > > dpm_force_performance_level, User can set sclk/mclk/pcie range
>>> > > > through
>>> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
>>> > > > directly.
>>> > > >
>>> > > > In order to not break currently tools, when set "manual" to
>>> > > > power_dpm_force_performance_level driver will do nothing and
>>> > > > just return successful.
>>> > > >
>>> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
>>> > > > Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
>>> > > > ---
>>> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>>> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>>> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
>>> > +++++++--------
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>>> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
>>> > > >
>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > > index 1812009..66b4df0 100644
>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > > @@ -152,7 +152,6 @@ static ssize_t
>>> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
>>> "auto" :
>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
>>> "high" :
>>> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
>>> > > "manual" :
>>> > > >                        (level ==
>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>>> > > >                        (level ==
>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>>> > > >                        (level ==
>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
>>> > > > @@ -186,7 +185,7 @@ static ssize_t
>>> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
>>> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
>>> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
>>> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
>>> > > > +             pr_info("No need to set manual mode, Just go
>>> ahead\n");
>>> > > >        } else if (strncmp("profile_exit", buf,
>>> > > strlen("profile_exit")) == 0) {
>>> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>>> > > >        } else if (strncmp("profile_standard", buf,
>>> > > strlen("profile_standard")) == 0) {
>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > > index ab45232..8ddc978 100644
>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
>>> > *handle,
>>> > > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>> > > >        struct ci_power_info *pi = ci_get_pi(adev);
>>> > > >
>>> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                if (!pi->sclk_dpm_key_disabled) diff --git
>>> > > > a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > > index b9aa9f4..3fab686 100644
>>> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
>>> > > >
>>> > > >  enum amd_dpm_forced_level {
>>> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
>>> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
>>> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
>>> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
>>> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
>>> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>>> > > >  };
>>> > > >
>>> > > >  enum amd_pm_state_type {
>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > > index dec8dd9..60d280c 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
>>> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>>> > > >                enum pp_clock_type type, uint32_t mask)  {
>>> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > > index 409a56b..eddcbcd 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >
>>> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
>>> > > >
>>> > > RAVEN_UMD_PSTATE_MIN_FCLK);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > > index 13db75c..e3a8374 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>>> > > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >  {
>>> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
>>> > *)(hwmgr->backend);
>>> > > >
>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
>>> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                if (!data->sclk_dpm_key_disabled) diff --git
>>> > > > a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > > index 6b28896..828677e 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > > @@ -4241,7 +4241,6 @@ static int
>>> > > > vega10_dpm_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
>>> 1<<sclk_mask);
>>> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
>>> 1<<mclk_mask);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > @@ -4500,11 +4499,6 @@ static int
>>> > > > vega10_force_clock_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >  {
>>> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
>>> > > *)(hwmgr->backend);
>>> > > >
>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                data->smc_state_table.gfx_boot_level = mask ?
>>> > > (ffs(mask) - 1) : 0;
>>> > >
>>> >
>>>
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                                             ` <CADnq5_Njpv+OnXRD0bo4ZefjxR8LLnfsyTCoTmdmYzgYAuBXOA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-01-29 21:51                                               ` Zhu, Rex
       [not found]                                                 ` <CY4PR12MB16876451346CA39EE72B4844FBE50-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Zhu, Rex @ 2018-01-29 21:51 UTC (permalink / raw)
  To: Alex Deucher; +Cc: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW


[-- Attachment #1.1: Type: text/plain, Size: 20532 bytes --]

>I'd prefer to keep the requirement to select manual mode to be able to
>manually mess with the clock levels.  This also makes simplifies the
>profile interface by only allowing you to change the profile if you
>select manual mode first.  That way we don't have to add an AUTO
>profile to the profile selection to let the driver pick the profile.
>that can happen automatically if the user sets the force_performance
>level to auto.


Hi Alex,


so you mean use "auto"  state of force_performance_leve as reset mode to all the performance related sysfs.


Currently we just unfore the clock range in the auto state.


Anyway, I will drop this patch.


Best Regards

Rex


________________________________
From: Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Sent: Tuesday, January 30, 2018 2:02 AM
To: Zhu, Rex
Cc: Kuehling, Felix; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level

On Mon, Jan 29, 2018 at 7:03 AM, Zhu, Rex <Rex.Zhu-5C7GfCeVMHo@public.gmane.org> wrote:
> Hi Alex,
>
>>How about we make the profile selection dependent on selecting the manual force_performance_level
>
> If so, we need to check pm.dpm.forced_level  before "profile heuristics" and "disable power containment" as same as pp_dpm_sclk/mclk/pcie.
>
> if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>                                AMD_DPM_FORCED_LEVEL_LOW |
>                                AMD_DPM_FORCED_LEVEL_HIGH))
>                return -EINVAL;
>
> How about delete judging code before force_clock_level and just change force_level to manual after force_clock_level. Please see the attached patch.
>
> This change have no impact on existing interface and tools.
> It just refine the logic of power_dpm_force_performance_level and pp_dpm_sclk/mclk/pcie.
> Also has no impact to power/computer profile mode on smu7.
>
> The difference is:
>
> New user can have one less command need to input if they want to use pp_dpm_sclk/mclk/pcie.

I'd prefer to keep the requirement to select manual mode to be able to
manually mess with the clock levels.  This also makes simplifies the
profile interface by only allowing you to change the profile if you
select manual mode first.  That way we don't have to add an AUTO
profile to the profile selection to let the driver pick the profile.
that can happen automatically if the user sets the force_performance
level to auto.

Alex

>
>
> Best Regards
> Rex
> -----Original Message-----
> From: Alex Deucher [mailto:alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org]
> Sent: Saturday, January 27, 2018 7:51 AM
> To: Zhu, Rex
> Cc: Kuehling, Felix; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
>
> I think we have two use cases for the profiles:
>
> 1. automatic profile switching for different driver use cases 2. manually tweaking profiles/clocks/power for testing
>
> How about we make the profile selection dependent on selecting the manual force_performance_level and not add an auto to the profile selector.  Then when you select manual you can tweak the clocks and profile heuristics and power containment via their respective knobs.
>
>
> Alex
>
> On Fri, Jan 26, 2018 at 3:08 PM, Zhu, Rex <Rex.Zhu-5C7GfCeVMHo@public.gmane.org> wrote:
>>>Existing tools and users expect that switching back to auto removes
>>>the manual clock settings. If you allow changing the clock in auto
>>>mode, that won't happen any more.
>>
>>
>> I have sent the patch v2 to fix this problem. user can swith back auto
>> mode and all manual clock setting will be removed.
>>
>>
>>>One more reason why allowing the user to set pp_dpm_sckl/mclk
>>>shouldn't be  allowed in auto-mode.
>>
>> this is an old logic, maybe ref radeon driver.
>> Driver can allow to set pp_dpm_sclk/mclk range in auto/high/low mode.
>>
>> Best Regards
>> Rex
>> ________________________________
>> From: Kuehling, Felix
>> Sent: Saturday, January 27, 2018 3:32 AM
>> To: Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>> power_dpm_force_performance_level
>>
>> On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>>>
>>> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>> >    interfaces, which affects existing tools
>>>
>>>
>>> Rex: I don't think the patch will affects existing tools.
>>>
>>>
>>> User set "manual" to power_performance_level, and then change the
>>> clock range through  pp_dpm_sclk/mclk/pcie.
>>>
>>>
>>> with this patch, User dont need to set "manual" command,  if still
>>> receive the manual command, driver just return sucess to user in
>>> order not  break existing
>>>
>>> tools.
>>>
>>
>> Existing tools and users expect that switching back to auto removes
>> the manual clock settings. If you allow changing the clock in auto
>> mode, that won't happen any more.
>>
>>>
>>>  >2. You're taking the clock limits out of the power profile.
>>>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>>>  >   the compute power profile
>>>
>>>
>>> Rex: In vega10, under default comput mode(with
>>> busy_set_point/fps/use_rlc_busy/min_active_level set), just two
>>> performance levels left
>>> (level0 and level7). and clock just switch between lowest and highest.
>>>
>>> I am not sure in this case, driver still can set min sclk/mclk.
>>
>> One more reason why allowing the user to set pp_dpm_sckl/mclk
>> shouldn't be allowed in auto-mode.
>>
>> Regards,
>>   Felix
>>
>>>
>>> Best Regards
>>> Rex
>>>
>>>
>>> ---------------------------------------------------------------------
>>> ---
>>> *From:* Kuehling, Felix
>>> *Sent:* Saturday, January 27, 2018 12:49 AM
>>> *To:* Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> power_dpm_force_performance_level
>>>
>>> Hi Rex,
>>>
>>> I think I understand what you're trying to do. To summarize my
>>> concerns, there are two reasons I'm against your plan:
>>>
>>>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>>     interfaces, which affects existing tools  2. You're taking the
>>> clock limits out of the power profile.
>>>     Automatically adjusting the minimum sclk/mclk is a requirement for
>>>     the compute power profile
>>>
>>> Regards,
>>>   Felix
>>>
>>> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>>> >
>>> > Hi Felix,
>>> >
>>> >
>>> > >That would make sense. But switching to manual mode would disable
>>> > >profiles and automatic profile selection. That was one reason why
>>> > >I objected to your plan to control profile clock limits using
>>> > >these
>>> files.
>>> >
>>> > Rex:
>>> >
>>> >
>>> > I am not very clear the old logic of gfx/compute power profile switch.
>>> >
>>> >
>>> > But with new sysfs,
>>> >
>>> >
>>> >
>>> > The logic is(those sysfs are independent)
>>> >
>>> >  1. configure uphyst/downhyst/min_ativity through
>>> > power_profile_mode,
>>> >
>>> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once
>>> > this sysffs was called, set the dpm level mode to unknown)
>>> >
>>> >       3. adjust power limit through pp_od_power_limit(maybe equal
>>> > to disable power containment).
>>> >
>>> >
>>> >
>>> > In those functions, driver do not check the dpm level mode.
>>> >
>>> > the dpm level mode just used by power_dpm_force_performance_level
>>> > functions.
>>> >
>>> >
>>> > Best Regards
>>> >
>>> > Rex
>>> >
>>> >
>>> >
>>> >
>>> >
>>> > -------------------------------------------------------------------
>>> > -----
>>> > *From:* Kuehling, Felix
>>> > *Sent:* Friday, January 26, 2018 8:26 AM
>>> > *To:* Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> > power_dpm_force_performance_level
>>> >
>>> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
>>> > > I also think about this problem.
>>> > > just think user should unforced clk level through pp dpm
>>> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
>>> > >
>>> > > The logic seems weird, As we supply many sysfs for adjust clock range.
>>> > >
>>> > > We can fix this problem by change current mode to manual mode
>>> > > after user call pp dpm sclk/mclk/pcie.
>>> > >
>>> > > But another think,if user change back the clk range through pp
>>> > > dpm
>>> clk.
>>> > >
>>> > > we are in manual mode, and user set auto mode, in fact, driver
>>> > > change nothing.
>>> >
>>> > With profiles, switching back to auto mode would select the
>>> > appropriate profile, which may have a different clock mask. For
>>> > example for compute we enable only the highest two sclk levels.
>>> >
>>> > >
>>> > > Comparatively speaking, better set manual mode after user call pp
>>> > dpm clk.
>>> >
>>> > That would make sense. But switching to manual mode would disable
>>> > profiles and automatic profile selection. That was one reason why I
>>> > objected to your plan to control profile clock limits using these files.
>>> >
>>> > Regards,
>>> >   Felix
>>> >
>>> > > Thanks very much.
>>> > >
>>> > > Best Regards
>>> > > Rex
>>> > >
>>> ---------------------------------------------------------------------
>>> ---
>>> > > *From:* Kuehling, Felix
>>> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
>>> > > *To:* amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org; Zhu, Rex
>>> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> > > power_dpm_force_performance_level
>>> > >
>>> > > This patch breaks unforcing of clocks, which is currently done by
>>> > > switching back from "manual" to "auto". By removing "manual"
>>> > > mode, you remove the ability to unset forced clocks.
>>> > >
>>> > > Regards,
>>> > >   Felix
>>> > >
>>> > >
>>> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
>>> > > > Driver do not maintain manual mode for
>>> > > > dpm_force_performance_level, User can set sclk/mclk/pcie range
>>> > > > through
>>> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
>>> > > > directly.
>>> > > >
>>> > > > In order to not break currently tools, when set "manual" to
>>> > > > power_dpm_force_performance_level driver will do nothing and
>>> > > > just return successful.
>>> > > >
>>> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
>>> > > > Signed-off-by: Rex Zhu <Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
>>> > > > ---
>>> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>>> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>>> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
>>> > +++++++--------
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>>> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
>>> > > >
>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > > index 1812009..66b4df0 100644
>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> > > > @@ -152,7 +152,6 @@ static ssize_t
>>> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
>>> "auto" :
>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
>>> "high" :
>>> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
>>> > > "manual" :
>>> > > >                        (level ==
>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>>> > > >                        (level ==
>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>>> > > >                        (level ==
>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
>>> > > > @@ -186,7 +185,7 @@ static ssize_t
>>> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
>>> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
>>> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
>>> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
>>> > > > +             pr_info("No need to set manual mode, Just go
>>> ahead\n");
>>> > > >        } else if (strncmp("profile_exit", buf,
>>> > > strlen("profile_exit")) == 0) {
>>> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>>> > > >        } else if (strncmp("profile_standard", buf,
>>> > > strlen("profile_standard")) == 0) {
>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > > index ab45232..8ddc978 100644
>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
>>> > *handle,
>>> > > >        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>> > > >        struct ci_power_info *pi = ci_get_pi(adev);
>>> > > >
>>> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                if (!pi->sclk_dpm_key_disabled) diff --git
>>> > > > a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > > index b9aa9f4..3fab686 100644
>>> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
>>> > > >
>>> > > >  enum amd_dpm_forced_level {
>>> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
>>> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
>>> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
>>> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
>>> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
>>> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>>> > > >  };
>>> > > >
>>> > > >  enum amd_pm_state_type {
>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > > index dec8dd9..60d280c 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
>>> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>>> > > >                enum pp_clock_type type, uint32_t mask)  {
>>> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > > index 409a56b..eddcbcd 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >
>>> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
>>> > > >
>>> > > RAVEN_UMD_PSTATE_MIN_FCLK);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > > index 13db75c..e3a8374 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
>>> > > >                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >  {
>>> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
>>> > *)(hwmgr->backend);
>>> > > >
>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
>>> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                if (!data->sclk_dpm_key_disabled) diff --git
>>> > > > a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > > index 6b28896..828677e 100644
>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>> > > > @@ -4241,7 +4241,6 @@ static int
>>> > > > vega10_dpm_force_dpm_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
>>> 1<<sclk_mask);
>>> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
>>> 1<<mclk_mask);
>>> > > >                break;
>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>> > > >        default:
>>> > > >                break;
>>> > > > @@ -4500,11 +4499,6 @@ static int
>>> > > > vega10_force_clock_level(struct
>>> > > pp_hwmgr *hwmgr,
>>> > > >  {
>>> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
>>> > > *)(hwmgr->backend);
>>> > > >
>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>> > > > -             return -EINVAL;
>>> > > > -
>>> > > >        switch (type) {
>>> > > >        case PP_SCLK:
>>> > > >                data->smc_state_table.gfx_boot_level = mask ?
>>> > > (ffs(mask) - 1) : 0;
>>> > >
>>> >
>>>
>>
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>

[-- Attachment #1.2: Type: text/html, Size: 40083 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                                                 ` <CY4PR12MB16876451346CA39EE72B4844FBE50-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2018-01-29 22:26                                                   ` Alex Deucher
       [not found]                                                     ` <CADnq5_O-43_fW4_4D=ztPDhww44fADHFLyHkNCT+WRLv3usQxQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Alex Deucher @ 2018-01-29 22:26 UTC (permalink / raw)
  To: Zhu, Rex; +Cc: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Mon, Jan 29, 2018 at 4:51 PM, Zhu, Rex <Rex.Zhu@amd.com> wrote:
>>I'd prefer to keep the requirement to select manual mode to be able to
>>manually mess with the clock levels.  This also makes simplifies the
>>profile interface by only allowing you to change the profile if you
>>select manual mode first.  That way we don't have to add an AUTO
>>profile to the profile selection to let the driver pick the profile.
>>that can happen automatically if the user sets the force_performance
>>level to auto.
>
>
> Hi Alex,
>
>
> so you mean use "auto"  state of force_performance_leve as reset mode to all
> the performance related sysfs.

My thinking was that with force_performance_level set to auto, the
driver will handle everything by itself.  The driver would pick the
power profile dynamically, all of the dpm states relevant to the power
profiles would be enabled, etc.  If the user selects manual in
force_performance_level, then they can select which sclk/mclk/pcie dpm
states are active as before, and now with the profile stuff, they can
select either a predefined profile (video, vr, 3d, etc.) or a custom
one.  If they want to go back to automatic driver control, they just
switch force_performance_level back to auto.  That way we have just
one interface to control the behavior and there are fewer corner cases
where features interact.  Either select auto and let the driver handle
it all, or select manual and tweak everything manually.

Alex

>
>
> Currently we just unfore the clock range in the auto state.
>
>
> Anyway, I will drop this patch.
>
>
> Best Regards
>
> Rex
>
>
>
> ________________________________
> From: Alex Deucher <alexdeucher@gmail.com>
> Sent: Tuesday, January 30, 2018 2:02 AM
>
> To: Zhu, Rex
> Cc: Kuehling, Felix; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>
> On Mon, Jan 29, 2018 at 7:03 AM, Zhu, Rex <Rex.Zhu@amd.com> wrote:
>> Hi Alex,
>>
>>>How about we make the profile selection dependent on selecting the manual
>>> force_performance_level
>>
>> If so, we need to check pm.dpm.forced_level  before "profile heuristics"
>> and "disable power containment" as same as pp_dpm_sclk/mclk/pcie.
>>
>> if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>                                AMD_DPM_FORCED_LEVEL_LOW |
>>                                AMD_DPM_FORCED_LEVEL_HIGH))
>>                return -EINVAL;
>>
>> How about delete judging code before force_clock_level and just change
>> force_level to manual after force_clock_level. Please see the attached
>> patch.
>>
>> This change have no impact on existing interface and tools.
>> It just refine the logic of power_dpm_force_performance_level and
>> pp_dpm_sclk/mclk/pcie.
>> Also has no impact to power/computer profile mode on smu7.
>>
>> The difference is:
>>
>> New user can have one less command need to input if they want to use
>> pp_dpm_sclk/mclk/pcie.
>
> I'd prefer to keep the requirement to select manual mode to be able to
> manually mess with the clock levels.  This also makes simplifies the
> profile interface by only allowing you to change the profile if you
> select manual mode first.  That way we don't have to add an AUTO
> profile to the profile selection to let the driver pick the profile.
> that can happen automatically if the user sets the force_performance
> level to auto.
>
> Alex
>
>>
>>
>> Best Regards
>> Rex
>> -----Original Message-----
>> From: Alex Deucher [mailto:alexdeucher@gmail.com]
>> Sent: Saturday, January 27, 2018 7:51 AM
>> To: Zhu, Rex
>> Cc: Kuehling, Felix; amd-gfx@lists.freedesktop.org
>> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>> power_dpm_force_performance_level
>>
>> I think we have two use cases for the profiles:
>>
>> 1. automatic profile switching for different driver use cases 2. manually
>> tweaking profiles/clocks/power for testing
>>
>> How about we make the profile selection dependent on selecting the manual
>> force_performance_level and not add an auto to the profile selector.  Then
>> when you select manual you can tweak the clocks and profile heuristics and
>> power containment via their respective knobs.
>>
>>
>> Alex
>>
>> On Fri, Jan 26, 2018 at 3:08 PM, Zhu, Rex <Rex.Zhu@amd.com> wrote:
>>>>Existing tools and users expect that switching back to auto removes
>>>>the manual clock settings. If you allow changing the clock in auto
>>>>mode, that won't happen any more.
>>>
>>>
>>> I have sent the patch v2 to fix this problem. user can swith back auto
>>> mode and all manual clock setting will be removed.
>>>
>>>
>>>>One more reason why allowing the user to set pp_dpm_sckl/mclk
>>>>shouldn't be  allowed in auto-mode.
>>>
>>> this is an old logic, maybe ref radeon driver.
>>> Driver can allow to set pp_dpm_sclk/mclk range in auto/high/low mode.
>>>
>>> Best Regards
>>> Rex
>>> ________________________________
>>> From: Kuehling, Felix
>>> Sent: Saturday, January 27, 2018 3:32 AM
>>> To: Zhu, Rex; amd-gfx@lists.freedesktop.org
>>> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> power_dpm_force_performance_level
>>>
>>> On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>>>>
>>>> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>>> >    interfaces, which affects existing tools
>>>>
>>>>
>>>> Rex: I don't think the patch will affects existing tools.
>>>>
>>>>
>>>> User set "manual" to power_performance_level, and then change the
>>>> clock range through  pp_dpm_sclk/mclk/pcie.
>>>>
>>>>
>>>> with this patch, User dont need to set "manual" command,  if still
>>>> receive the manual command, driver just return sucess to user in
>>>> order not  break existing
>>>>
>>>> tools.
>>>>
>>>
>>> Existing tools and users expect that switching back to auto removes
>>> the manual clock settings. If you allow changing the clock in auto
>>> mode, that won't happen any more.
>>>
>>>>
>>>>  >2. You're taking the clock limits out of the power profile.
>>>>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>>>>  >   the compute power profile
>>>>
>>>>
>>>> Rex: In vega10, under default comput mode(with
>>>> busy_set_point/fps/use_rlc_busy/min_active_level set), just two
>>>> performance levels left
>>>> (level0 and level7). and clock just switch between lowest and highest.
>>>>
>>>> I am not sure in this case, driver still can set min sclk/mclk.
>>>
>>> One more reason why allowing the user to set pp_dpm_sckl/mclk
>>> shouldn't be allowed in auto-mode.
>>>
>>> Regards,
>>>   Felix
>>>
>>>>
>>>> Best Regards
>>>> Rex
>>>>
>>>>
>>>> ---------------------------------------------------------------------
>>>> ---
>>>> *From:* Kuehling, Felix
>>>> *Sent:* Saturday, January 27, 2018 12:49 AM
>>>> *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>>>> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>>> power_dpm_force_performance_level
>>>>
>>>> Hi Rex,
>>>>
>>>> I think I understand what you're trying to do. To summarize my
>>>> concerns, there are two reasons I'm against your plan:
>>>>
>>>>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>>>     interfaces, which affects existing tools  2. You're taking the
>>>> clock limits out of the power profile.
>>>>     Automatically adjusting the minimum sclk/mclk is a requirement for
>>>>     the compute power profile
>>>>
>>>> Regards,
>>>>   Felix
>>>>
>>>> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>>>> >
>>>> > Hi Felix,
>>>> >
>>>> >
>>>> > >That would make sense. But switching to manual mode would disable
>>>> > >profiles and automatic profile selection. That was one reason why
>>>> > >I objected to your plan to control profile clock limits using
>>>> > >these
>>>> files.
>>>> >
>>>> > Rex:
>>>> >
>>>> >
>>>> > I am not very clear the old logic of gfx/compute power profile switch.
>>>> >
>>>> >
>>>> > But with new sysfs,
>>>> >
>>>> >
>>>> >
>>>> > The logic is(those sysfs are independent)
>>>> >
>>>> >  1. configure uphyst/downhyst/min_ativity through
>>>> > power_profile_mode,
>>>> >
>>>> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once
>>>> > this sysffs was called, set the dpm level mode to unknown)
>>>> >
>>>> >       3. adjust power limit through pp_od_power_limit(maybe equal
>>>> > to disable power containment).
>>>> >
>>>> >
>>>> >
>>>> > In those functions, driver do not check the dpm level mode.
>>>> >
>>>> > the dpm level mode just used by power_dpm_force_performance_level
>>>> > functions.
>>>> >
>>>> >
>>>> > Best Regards
>>>> >
>>>> > Rex
>>>> >
>>>> >
>>>> >
>>>> >
>>>> >
>>>> > -------------------------------------------------------------------
>>>> > -----
>>>> > *From:* Kuehling, Felix
>>>> > *Sent:* Friday, January 26, 2018 8:26 AM
>>>> > *To:* Zhu, Rex; amd-gfx@lists.freedesktop.org
>>>> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>>> > power_dpm_force_performance_level
>>>> >
>>>> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
>>>> > > I also think about this problem.
>>>> > > just think user should unforced clk level through pp dpm
>>>> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
>>>> > >
>>>> > > The logic seems weird, As we supply many sysfs for adjust clock
>>>> > > range.
>>>> > >
>>>> > > We can fix this problem by change current mode to manual mode
>>>> > > after user call pp dpm sclk/mclk/pcie.
>>>> > >
>>>> > > But another think,if user change back the clk range through pp
>>>> > > dpm
>>>> clk.
>>>> > >
>>>> > > we are in manual mode, and user set auto mode, in fact, driver
>>>> > > change nothing.
>>>> >
>>>> > With profiles, switching back to auto mode would select the
>>>> > appropriate profile, which may have a different clock mask. For
>>>> > example for compute we enable only the highest two sclk levels.
>>>> >
>>>> > >
>>>> > > Comparatively speaking, better set manual mode after user call pp
>>>> > dpm clk.
>>>> >
>>>> > That would make sense. But switching to manual mode would disable
>>>> > profiles and automatic profile selection. That was one reason why I
>>>> > objected to your plan to control profile clock limits using these
>>>> > files.
>>>> >
>>>> > Regards,
>>>> >   Felix
>>>> >
>>>> > > Thanks very much.
>>>> > >
>>>> > > Best Regards
>>>> > > Rex
>>>> > >
>>>> ---------------------------------------------------------------------
>>>> ---
>>>> > > *From:* Kuehling, Felix
>>>> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
>>>> > > *To:* amd-gfx@lists.freedesktop.org; Zhu, Rex
>>>> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>>> > > power_dpm_force_performance_level
>>>> > >
>>>> > > This patch breaks unforcing of clocks, which is currently done by
>>>> > > switching back from "manual" to "auto". By removing "manual"
>>>> > > mode, you remove the ability to unset forced clocks.
>>>> > >
>>>> > > Regards,
>>>> > >   Felix
>>>> > >
>>>> > >
>>>> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
>>>> > > > Driver do not maintain manual mode for
>>>> > > > dpm_force_performance_level, User can set sclk/mclk/pcie range
>>>> > > > through
>>>> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
>>>> > > > directly.
>>>> > > >
>>>> > > > In order to not break currently tools, when set "manual" to
>>>> > > > power_dpm_force_performance_level driver will do nothing and
>>>> > > > just return successful.
>>>> > > >
>>>> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
>>>> > > > Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
>>>> > > > ---
>>>> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>>>> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>>>> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
>>>> > +++++++--------
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>>>> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
>>>> > > >
>>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > > index 1812009..66b4df0 100644
>>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > > @@ -152,7 +152,6 @@ static ssize_t
>>>> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
>>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
>>>> "auto" :
>>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low"
>>>> > > > :
>>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
>>>> "high" :
>>>> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
>>>> > > "manual" :
>>>> > > >                        (level ==
>>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>>>> > > >                        (level ==
>>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>>>> > > >                        (level ==
>>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
>>>> > > > @@ -186,7 +185,7 @@ static ssize_t
>>>> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>>> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
>>>> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
>>>> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
>>>> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
>>>> > > > +             pr_info("No need to set manual mode, Just go
>>>> ahead\n");
>>>> > > >        } else if (strncmp("profile_exit", buf,
>>>> > > strlen("profile_exit")) == 0) {
>>>> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>>>> > > >        } else if (strncmp("profile_standard", buf,
>>>> > > strlen("profile_standard")) == 0) {
>>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > > index ab45232..8ddc978 100644
>>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
>>>> > *handle,
>>>> > > >        struct amdgpu_device *adev = (struct amdgpu_device
>>>> > > > *)handle;
>>>> > > >        struct ci_power_info *pi = ci_get_pi(adev);
>>>> > > >
>>>> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                if (!pi->sclk_dpm_key_disabled) diff --git
>>>> > > > a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > > index b9aa9f4..3fab686 100644
>>>> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
>>>> > > >
>>>> > > >  enum amd_dpm_forced_level {
>>>> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>>>> > > >  };
>>>> > > >
>>>> > > >  enum amd_pm_state_type {
>>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > > index dec8dd9..60d280c 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
>>>> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>>>> > > >                enum pp_clock_type type, uint32_t mask)  {
>>>> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
>>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > > index 409a56b..eddcbcd 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >
>>>> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
>>>> > > >
>>>> > > RAVEN_UMD_PSTATE_MIN_FCLK);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > > index 13db75c..e3a8374 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >                smu7_force_clock_level(hwmgr, PP_MCLK,
>>>> > > > 1<<mclk_mask);
>>>> > > >                smu7_force_clock_level(hwmgr, PP_PCIE,
>>>> > > > 1<<pcie_mask);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >  {
>>>> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
>>>> > *)(hwmgr->backend);
>>>> > > >
>>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>>> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
>>>> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                if (!data->sclk_dpm_key_disabled) diff --git
>>>> > > > a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > > index 6b28896..828677e 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > > @@ -4241,7 +4241,6 @@ static int
>>>> > > > vega10_dpm_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
>>>> 1<<sclk_mask);
>>>> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
>>>> 1<<mclk_mask);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > @@ -4500,11 +4499,6 @@ static int
>>>> > > > vega10_force_clock_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >  {
>>>> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
>>>> > > *)(hwmgr->backend);
>>>> > > >
>>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                data->smc_state_table.gfx_boot_level = mask ?
>>>> > > (ffs(mask) - 1) : 0;
>>>> > >
>>>> >
>>>>
>>>
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level
       [not found]                                                     ` <CADnq5_O-43_fW4_4D=ztPDhww44fADHFLyHkNCT+WRLv3usQxQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-01-29 22:45                                                       ` Zhu, Rex
  0 siblings, 0 replies; 16+ messages in thread
From: Zhu, Rex @ 2018-01-29 22:45 UTC (permalink / raw)
  To: Alex Deucher; +Cc: Kuehling, Felix, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW


[-- Attachment #1.1: Type: text/plain, Size: 22515 bytes --]

Hi Alex,


It's Ok, I got it.

Thanks.


Best Regards
Rex

________________________________
From: Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Sent: Tuesday, January 30, 2018 6:26 AM
To: Zhu, Rex
Cc: Kuehling, Felix; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level

On Mon, Jan 29, 2018 at 4:51 PM, Zhu, Rex <Rex.Zhu-5C7GfCeVMHo@public.gmane.org> wrote:
>>I'd prefer to keep the requirement to select manual mode to be able to
>>manually mess with the clock levels.  This also makes simplifies the
>>profile interface by only allowing you to change the profile if you
>>select manual mode first.  That way we don't have to add an AUTO
>>profile to the profile selection to let the driver pick the profile.
>>that can happen automatically if the user sets the force_performance
>>level to auto.
>
>
> Hi Alex,
>
>
> so you mean use "auto"  state of force_performance_leve as reset mode to all
> the performance related sysfs.

My thinking was that with force_performance_level set to auto, the
driver will handle everything by itself.  The driver would pick the
power profile dynamically, all of the dpm states relevant to the power
profiles would be enabled, etc.  If the user selects manual in
force_performance_level, then they can select which sclk/mclk/pcie dpm
states are active as before, and now with the profile stuff, they can
select either a predefined profile (video, vr, 3d, etc.) or a custom
one.  If they want to go back to automatic driver control, they just
switch force_performance_level back to auto.  That way we have just
one interface to control the behavior and there are fewer corner cases
where features interact.  Either select auto and let the driver handle
it all, or select manual and tweak everything manually.

Alex

>
>
> Currently we just unfore the clock range in the auto state.
>
>
> Anyway, I will drop this patch.
>
>
> Best Regards
>
> Rex
>
>
>
> ________________________________
> From: Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> Sent: Tuesday, January 30, 2018 2:02 AM
>
> To: Zhu, Rex
> Cc: Kuehling, Felix; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
> power_dpm_force_performance_level
>
> On Mon, Jan 29, 2018 at 7:03 AM, Zhu, Rex <Rex.Zhu-5C7GfCeVMHo@public.gmane.org> wrote:
>> Hi Alex,
>>
>>>How about we make the profile selection dependent on selecting the manual
>>> force_performance_level
>>
>> If so, we need to check pm.dpm.forced_level  before "profile heuristics"
>> and "disable power containment" as same as pp_dpm_sclk/mclk/pcie.
>>
>> if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>                                AMD_DPM_FORCED_LEVEL_LOW |
>>                                AMD_DPM_FORCED_LEVEL_HIGH))
>>                return -EINVAL;
>>
>> How about delete judging code before force_clock_level and just change
>> force_level to manual after force_clock_level. Please see the attached
>> patch.
>>
>> This change have no impact on existing interface and tools.
>> It just refine the logic of power_dpm_force_performance_level and
>> pp_dpm_sclk/mclk/pcie.
>> Also has no impact to power/computer profile mode on smu7.
>>
>> The difference is:
>>
>> New user can have one less command need to input if they want to use
>> pp_dpm_sclk/mclk/pcie.
>
> I'd prefer to keep the requirement to select manual mode to be able to
> manually mess with the clock levels.  This also makes simplifies the
> profile interface by only allowing you to change the profile if you
> select manual mode first.  That way we don't have to add an AUTO
> profile to the profile selection to let the driver pick the profile.
> that can happen automatically if the user sets the force_performance
> level to auto.
>
> Alex
>
>>
>>
>> Best Regards
>> Rex
>> -----Original Message-----
>> From: Alex Deucher [mailto:alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org]
>> Sent: Saturday, January 27, 2018 7:51 AM
>> To: Zhu, Rex
>> Cc: Kuehling, Felix; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>> power_dpm_force_performance_level
>>
>> I think we have two use cases for the profiles:
>>
>> 1. automatic profile switching for different driver use cases 2. manually
>> tweaking profiles/clocks/power for testing
>>
>> How about we make the profile selection dependent on selecting the manual
>> force_performance_level and not add an auto to the profile selector.  Then
>> when you select manual you can tweak the clocks and profile heuristics and
>> power containment via their respective knobs.
>>
>>
>> Alex
>>
>> On Fri, Jan 26, 2018 at 3:08 PM, Zhu, Rex <Rex.Zhu-5C7GfCeVMHo@public.gmane.org> wrote:
>>>>Existing tools and users expect that switching back to auto removes
>>>>the manual clock settings. If you allow changing the clock in auto
>>>>mode, that won't happen any more.
>>>
>>>
>>> I have sent the patch v2 to fix this problem. user can swith back auto
>>> mode and all manual clock setting will be removed.
>>>
>>>
>>>>One more reason why allowing the user to set pp_dpm_sckl/mclk
>>>>shouldn't be  allowed in auto-mode.
>>>
>>> this is an old logic, maybe ref radeon driver.
>>> Driver can allow to set pp_dpm_sclk/mclk range in auto/high/low mode.
>>>
>>> Best Regards
>>> Rex
>>> ________________________________
>>> From: Kuehling, Felix
>>> Sent: Saturday, January 27, 2018 3:32 AM
>>> To: Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>> Subject: Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>> power_dpm_force_performance_level
>>>
>>> On 2018-01-26 02:20 PM, Zhu, Rex wrote:
>>>>
>>>> >1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>>> >    interfaces, which affects existing tools
>>>>
>>>>
>>>> Rex: I don't think the patch will affects existing tools.
>>>>
>>>>
>>>> User set "manual" to power_performance_level, and then change the
>>>> clock range through  pp_dpm_sclk/mclk/pcie.
>>>>
>>>>
>>>> with this patch, User dont need to set "manual" command,  if still
>>>> receive the manual command, driver just return sucess to user in
>>>> order not  break existing
>>>>
>>>> tools.
>>>>
>>>
>>> Existing tools and users expect that switching back to auto removes
>>> the manual clock settings. If you allow changing the clock in auto
>>> mode, that won't happen any more.
>>>
>>>>
>>>>  >2. You're taking the clock limits out of the power profile.
>>>>  >  Automatically adjusting the minimum sclk/mclk is a requirement for
>>>>  >   the compute power profile
>>>>
>>>>
>>>> Rex: In vega10, under default comput mode(with
>>>> busy_set_point/fps/use_rlc_busy/min_active_level set), just two
>>>> performance levels left
>>>> (level0 and level7). and clock just switch between lowest and highest.
>>>>
>>>> I am not sure in this case, driver still can set min sclk/mclk.
>>>
>>> One more reason why allowing the user to set pp_dpm_sckl/mclk
>>> shouldn't be allowed in auto-mode.
>>>
>>> Regards,
>>>   Felix
>>>
>>>>
>>>> Best Regards
>>>> Rex
>>>>
>>>>
>>>> ---------------------------------------------------------------------
>>>> ---
>>>> *From:* Kuehling, Felix
>>>> *Sent:* Saturday, January 27, 2018 12:49 AM
>>>> *To:* Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>>> *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>>> power_dpm_force_performance_level
>>>>
>>>> Hi Rex,
>>>>
>>>> I think I understand what you're trying to do. To summarize my
>>>> concerns, there are two reasons I'm against your plan:
>>>>
>>>>  1. You're breaking the semantics of the existing pp_dpm_sclk/mclk/pcie
>>>>     interfaces, which affects existing tools  2. You're taking the
>>>> clock limits out of the power profile.
>>>>     Automatically adjusting the minimum sclk/mclk is a requirement for
>>>>     the compute power profile
>>>>
>>>> Regards,
>>>>   Felix
>>>>
>>>> On 2018-01-26 07:50 AM, Zhu, Rex wrote:
>>>> >
>>>> > Hi Felix,
>>>> >
>>>> >
>>>> > >That would make sense. But switching to manual mode would disable
>>>> > >profiles and automatic profile selection. That was one reason why
>>>> > >I objected to your plan to control profile clock limits using
>>>> > >these
>>>> files.
>>>> >
>>>> > Rex:
>>>> >
>>>> >
>>>> > I am not very clear the old logic of gfx/compute power profile switch.
>>>> >
>>>> >
>>>> > But with new sysfs,
>>>> >
>>>> >
>>>> >
>>>> > The logic is(those sysfs are independent)
>>>> >
>>>> >  1. configure uphyst/downhyst/min_ativity through
>>>> > power_profile_mode,
>>>> >
>>>> >       2. adjust clock range through pp_dpm_sclk/mclk/pcie.(once
>>>> > this sysffs was called, set the dpm level mode to unknown)
>>>> >
>>>> >       3. adjust power limit through pp_od_power_limit(maybe equal
>>>> > to disable power containment).
>>>> >
>>>> >
>>>> >
>>>> > In those functions, driver do not check the dpm level mode.
>>>> >
>>>> > the dpm level mode just used by power_dpm_force_performance_level
>>>> > functions.
>>>> >
>>>> >
>>>> > Best Regards
>>>> >
>>>> > Rex
>>>> >
>>>> >
>>>> >
>>>> >
>>>> >
>>>> > -------------------------------------------------------------------
>>>> > -----
>>>> > *From:* Kuehling, Felix
>>>> > *Sent:* Friday, January 26, 2018 8:26 AM
>>>> > *To:* Zhu, Rex; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>>> > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>>> > power_dpm_force_performance_level
>>>> >
>>>> > On 2018-01-25 07:07 PM, Zhu, Rex wrote:
>>>> > > I also think about this problem.
>>>> > > just think user should unforced clk level through pp dpm
>>>> > > sclk/mclk/pcie if they change the clock logic through those sysfs.
>>>> > >
>>>> > > The logic seems weird, As we supply many sysfs for adjust clock
>>>> > > range.
>>>> > >
>>>> > > We can fix this problem by change current mode to manual mode
>>>> > > after user call pp dpm sclk/mclk/pcie.
>>>> > >
>>>> > > But another think,if user change back the clk range through pp
>>>> > > dpm
>>>> clk.
>>>> > >
>>>> > > we are in manual mode, and user set auto mode, in fact, driver
>>>> > > change nothing.
>>>> >
>>>> > With profiles, switching back to auto mode would select the
>>>> > appropriate profile, which may have a different clock mask. For
>>>> > example for compute we enable only the highest two sclk levels.
>>>> >
>>>> > >
>>>> > > Comparatively speaking, better set manual mode after user call pp
>>>> > dpm clk.
>>>> >
>>>> > That would make sense. But switching to manual mode would disable
>>>> > profiles and automatic profile selection. That was one reason why I
>>>> > objected to your plan to control profile clock limits using these
>>>> > files.
>>>> >
>>>> > Regards,
>>>> >   Felix
>>>> >
>>>> > > Thanks very much.
>>>> > >
>>>> > > Best Regards
>>>> > > Rex
>>>> > >
>>>> ---------------------------------------------------------------------
>>>> ---
>>>> > > *From:* Kuehling, Felix
>>>> > > *Sent:* Friday, January 26, 2018 12:55:19 AM
>>>> > > *To:* amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org; Zhu, Rex
>>>> > > *Subject:* Re: [PATCH 1/2] drm/amd/pp: Remove manual mode for
>>>> > > power_dpm_force_performance_level
>>>> > >
>>>> > > This patch breaks unforcing of clocks, which is currently done by
>>>> > > switching back from "manual" to "auto". By removing "manual"
>>>> > > mode, you remove the ability to unset forced clocks.
>>>> > >
>>>> > > Regards,
>>>> > >   Felix
>>>> > >
>>>> > >
>>>> > > On 2018-01-25 06:26 AM, Rex Zhu wrote:
>>>> > > > Driver do not maintain manual mode for
>>>> > > > dpm_force_performance_level, User can set sclk/mclk/pcie range
>>>> > > > through
>>>> > > pp_dpm_sclk/pp_dpm_mclk/pp_dpm_pcie
>>>> > > > directly.
>>>> > > >
>>>> > > > In order to not break currently tools, when set "manual" to
>>>> > > > power_dpm_force_performance_level driver will do nothing and
>>>> > > > just return successful.
>>>> > > >
>>>> > > > Change-Id: Iaf672b9abc7fa57b765ceb7fa2fba6ad3e80c50b
>>>> > > > Signed-off-by: Rex Zhu <Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
>>>> > > > ---
>>>> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c             |  3 +--
>>>> > > >  drivers/gpu/drm/amd/amdgpu/ci_dpm.c                |  5 -----
>>>> > > >  drivers/gpu/drm/amd/include/kgd_pp_interface.h     | 15
>>>> > +++++++--------
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c     |  4 ----
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c     |  1 -
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   |  6 ------
>>>> > > >  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c |  6 ------
>>>> > > >  7 files changed, 8 insertions(+), 32 deletions(-)
>>>> > > >
>>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > > index 1812009..66b4df0 100644
>>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>>> > > > @@ -152,7 +152,6 @@ static ssize_t
>>>> > > amdgpu_get_dpm_forced_performance_level(struct device *dev,
>>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_AUTO) ?
>>>> "auto" :
>>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low"
>>>> > > > :
>>>> > > >                        (level == AMD_DPM_FORCED_LEVEL_HIGH) ?
>>>> "high" :
>>>> > > > -                     (level == AMD_DPM_FORCED_LEVEL_MANUAL) ?
>>>> > > "manual" :
>>>> > > >                        (level ==
>>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
>>>> > > >                        (level ==
>>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
>>>> > > >                        (level ==
>>>> > > AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
>>>> > > > @@ -186,7 +185,7 @@ static ssize_t
>>>> > > amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>>> > > >        } else if (strncmp("auto", buf, strlen("auto")) == 0) {
>>>> > > >                level = AMD_DPM_FORCED_LEVEL_AUTO;
>>>> > > >        } else if (strncmp("manual", buf, strlen("manual")) == 0) {
>>>> > > > -             level = AMD_DPM_FORCED_LEVEL_MANUAL;
>>>> > > > +             pr_info("No need to set manual mode, Just go
>>>> ahead\n");
>>>> > > >        } else if (strncmp("profile_exit", buf,
>>>> > > strlen("profile_exit")) == 0) {
>>>> > > >                level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
>>>> > > >        } else if (strncmp("profile_standard", buf,
>>>> > > strlen("profile_standard")) == 0) {
>>>> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > > index ab45232..8ddc978 100644
>>>> > > > --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > > +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
>>>> > > > @@ -6639,11 +6639,6 @@ static int ci_dpm_force_clock_level(void
>>>> > *handle,
>>>> > > >        struct amdgpu_device *adev = (struct amdgpu_device
>>>> > > > *)handle;
>>>> > > >        struct ci_power_info *pi = ci_get_pi(adev);
>>>> > > >
>>>> > > > -     if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                if (!pi->sclk_dpm_key_disabled) diff --git
>>>> > > > a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > > index b9aa9f4..3fab686 100644
>>>> > > > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
>>>> > > > @@ -41,14 +41,13 @@ struct amd_vce_state {
>>>> > > >
>>>> > > >  enum amd_dpm_forced_level {
>>>> > > >        AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_LOW = 0x4,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
>>>> > > > -     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_LOW = 0x2,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_HIGH = 0x4,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x8,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x10,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x20,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x40,
>>>> > > > +     AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x80,
>>>> > > >  };
>>>> > > >
>>>> > > >  enum amd_pm_state_type {
>>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > > index dec8dd9..60d280c 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
>>>> > > > @@ -1250,7 +1250,6 @@ static int cz_dpm_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >        case AMD_DPM_FORCED_LEVEL_AUTO:
>>>> > > >                ret = cz_phm_unforce_dpm_levels(hwmgr);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > @@ -1558,9 +1557,6 @@ static int cz_get_dal_power_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >  static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
>>>> > > >                enum pp_clock_type type, uint32_t mask)  {
>>>> > > > -     if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                smum_send_msg_to_smc_with_parameter(hwmgr,
>>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > > index 409a56b..eddcbcd 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
>>>> > > > @@ -605,7 +605,6 @@ static int rv_dpm_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >
>>>> > > PPSMC_MSG_SetSoftMaxFclkByFreq,
>>>> > > >
>>>> > > RAVEN_UMD_PSTATE_MIN_FCLK);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > > index 13db75c..e3a8374 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
>>>> > > > @@ -2798,7 +2798,6 @@ static int smu7_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >                smu7_force_clock_level(hwmgr, PP_MCLK,
>>>> > > > 1<<mclk_mask);
>>>> > > >                smu7_force_clock_level(hwmgr, PP_PCIE,
>>>> > > > 1<<pcie_mask);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > @@ -4311,11 +4310,6 @@ static int smu7_force_clock_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >  {
>>>> > > >        struct smu7_hwmgr *data = (struct smu7_hwmgr
>>>> > *)(hwmgr->backend);
>>>> > > >
>>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>>> > > > -                                     AMD_DPM_FORCED_LEVEL_LOW |
>>>> > > > -                                     AMD_DPM_FORCED_LEVEL_HIGH))
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                if (!data->sclk_dpm_key_disabled) diff --git
>>>> > > > a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > > index 6b28896..828677e 100644
>>>> > > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
>>>> > > > @@ -4241,7 +4241,6 @@ static int
>>>> > > > vega10_dpm_force_dpm_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >                vega10_force_clock_level(hwmgr, PP_SCLK,
>>>> 1<<sclk_mask);
>>>> > > >                vega10_force_clock_level(hwmgr, PP_MCLK,
>>>> 1<<mclk_mask);
>>>> > > >                break;
>>>> > > > -     case AMD_DPM_FORCED_LEVEL_MANUAL:
>>>> > > >        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
>>>> > > >        default:
>>>> > > >                break;
>>>> > > > @@ -4500,11 +4499,6 @@ static int
>>>> > > > vega10_force_clock_level(struct
>>>> > > pp_hwmgr *hwmgr,
>>>> > > >  {
>>>> > > >        struct vega10_hwmgr *data = (struct vega10_hwmgr
>>>> > > *)(hwmgr->backend);
>>>> > > >
>>>> > > > -     if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_LOW |
>>>> > > > -                             AMD_DPM_FORCED_LEVEL_HIGH))
>>>> > > > -             return -EINVAL;
>>>> > > > -
>>>> > > >        switch (type) {
>>>> > > >        case PP_SCLK:
>>>> > > >                data->smc_state_table.gfx_boot_level = mask ?
>>>> > > (ffs(mask) - 1) : 0;
>>>> > >
>>>> >
>>>>
>>>
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>>

[-- Attachment #1.2: Type: text/html, Size: 39998 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2018-01-29 22:45 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-25 11:26 [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level Rex Zhu
     [not found] ` <1516879614-11533-1-git-send-email-Rex.Zhu-5C7GfCeVMHo@public.gmane.org>
2018-01-25 11:26   ` [PATCH 2/2] drm/amd/pp: Fix sysfs pp_dpm_pcie bug on CI/VI Rex Zhu
2018-01-25 16:55   ` [PATCH 1/2] drm/amd/pp: Remove manual mode for power_dpm_force_performance_level Felix Kuehling
     [not found]     ` <51c6111b-78ec-36f8-b5e0-4a23ccea6de4-5C7GfCeVMHo@public.gmane.org>
2018-01-26  0:07       ` Zhu, Rex
     [not found]         ` <CY4PR12MB1687930CD8F44390C3791A63FBE10-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-01-26  0:26           ` Felix Kuehling
     [not found]             ` <0ce63372-dc11-9710-f11d-0cf6abf326b4-5C7GfCeVMHo@public.gmane.org>
2018-01-26 12:50               ` Zhu, Rex
     [not found]                 ` <CY4PR12MB168744ABA067C470390EA34EFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-01-26 16:49                   ` Felix Kuehling
     [not found]                     ` <ec6ea7dd-0096-dd92-8c49-b2992b5bf506-5C7GfCeVMHo@public.gmane.org>
2018-01-26 19:20                       ` Zhu, Rex
     [not found]                         ` <CY4PR12MB1687BFBCA906C0B17D52089BFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-01-26 19:32                           ` Felix Kuehling
     [not found]                             ` <cc9e6d84-9720-15fb-15ec-f608f8d9392d-5C7GfCeVMHo@public.gmane.org>
2018-01-26 20:08                               ` Zhu, Rex
     [not found]                                 ` <CY4PR12MB1687274014BDD739BE44DF6CFBE00-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-01-26 23:51                                   ` Alex Deucher
     [not found]                                     ` <CADnq5_Ni6j8ONe7f5rDMprbeB6Mq1RVXJAonUO2VTp+1Dgf+Gw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-01-29 12:03                                       ` Zhu, Rex
     [not found]                                         ` <CY4PR12MB1687A63BA8F717170700292DFBE50-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-01-29 18:02                                           ` Alex Deucher
     [not found]                                             ` <CADnq5_Njpv+OnXRD0bo4ZefjxR8LLnfsyTCoTmdmYzgYAuBXOA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-01-29 21:51                                               ` Zhu, Rex
     [not found]                                                 ` <CY4PR12MB16876451346CA39EE72B4844FBE50-rpdhrqHFk06Y0SjTqZDccQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-01-29 22:26                                                   ` Alex Deucher
     [not found]                                                     ` <CADnq5_O-43_fW4_4D=ztPDhww44fADHFLyHkNCT+WRLv3usQxQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-01-29 22:45                                                       ` Zhu, Rex

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.