All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures
@ 2021-02-22  4:03 Evan Quan
  2021-02-22  4:03 ` [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving Evan Quan
  2021-02-22 21:45 ` [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Alex Deucher
  0 siblings, 2 replies; 6+ messages in thread
From: Evan Quan @ 2021-02-22  4:03 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Evan Quan

To make sure they are naturally aligned.

Change-Id: I496a5b79158bdbd2e17f179098939e050b2ad489
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/include/kgd_pp_interface.h        | 11 ++++++-----
 drivers/gpu/drm/amd/pm/inc/smu_v11_0.h                |  4 ++--
 drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c |  8 ++++----
 drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |  8 ++++----
 drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c        |  8 ++++----
 5 files changed, 20 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 828513412e20..3a8f64e1a10c 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -332,9 +332,9 @@ struct amd_pm_funcs {
 };
 
 struct metrics_table_header {
-	uint16_t			structure_size;
-	uint8_t				format_revision;
-	uint8_t				content_revision;
+	uint32_t			structure_size;
+	uint16_t			format_revision;
+	uint16_t			content_revision;
 };
 
 struct gpu_metrics_v1_0 {
@@ -385,8 +385,9 @@ struct gpu_metrics_v1_0 {
 	uint16_t			current_fan_speed;
 
 	/* Link width/speed */
-	uint8_t				pcie_link_width;
-	uint8_t				pcie_link_speed; // in 0.1 GT/s
+	uint16_t			pcie_link_width;
+	uint16_t			pcie_link_speed; // in 0.1 GT/s
+	uint8_t				padding[2];
 };
 
 struct gpu_metrics_v2_0 {
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 50dd1529b994..f4e7a330f67f 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -284,11 +284,11 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
 
 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
 
-int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
+uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
 
 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
 
-int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
+uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
 
 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
 			      bool enablement);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index c0753029a8e2..95e905d8418d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -52,8 +52,8 @@
 
 #define LINK_WIDTH_MAX				6
 #define LINK_SPEED_MAX				3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static uint16_t link_speed[] = {25, 50, 80, 160};
 
 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, uint32_t mask);
@@ -2117,7 +2117,7 @@ static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
 		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
 }
 
-static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
+static uint16_t vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
 {
 	uint32_t width_level;
 
@@ -2137,7 +2137,7 @@ static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
 		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
 }
 
-static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
+static uint16_t vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
 {
 	uint32_t speed_level;
 
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 87811b005b85..3d462405b572 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -57,8 +57,8 @@
 
 #define LINK_WIDTH_MAX				6
 #define LINK_SPEED_MAX				3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static uint16_t link_speed[] = {25, 50, 80, 160};
 
 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
 {
@@ -3279,7 +3279,7 @@ static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
 		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
 }
 
-static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
+static uint16_t vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
 {
 	uint32_t width_level;
 
@@ -3299,7 +3299,7 @@ static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
 		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
 }
 
-static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
+static uint16_t vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
 {
 	uint32_t speed_level;
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 60ef63073ad4..86af9832ba9c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -99,8 +99,8 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
 #define mmCG_THERMAL_STATUS_ARCT		0x90
 #define mmCG_THERMAL_STATUS_ARCT_BASE_IDX	0
 
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static uint16_t link_speed[] = {25, 50, 80, 160};
 
 int smu_v11_0_init_microcode(struct smu_context *smu)
 {
@@ -2134,7 +2134,7 @@ int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
 		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
 }
 
-int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
+uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
 {
 	uint32_t width_level;
 
@@ -2154,7 +2154,7 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
 		>> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
 }
 
-int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
+uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
 {
 	uint32_t speed_level;
 
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving
  2021-02-22  4:03 [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Evan Quan
@ 2021-02-22  4:03 ` Evan Quan
  2021-02-22 21:47   ` Alex Deucher
  2021-02-22 21:45 ` [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Alex Deucher
  1 sibling, 1 reply; 6+ messages in thread
From: Evan Quan @ 2021-02-22  4:03 UTC (permalink / raw)
  To: amd-gfx; +Cc: Alexander.Deucher, Evan Quan

By using the information provided by PMFW when available.

Change-Id: I1afec4cd07ac9608861ee07c449e320e3f36a932
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 17 ++++++++++----
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   | 22 +++++++++++++++----
 2 files changed, 31 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 29e04f33f276..7fe2876c99fe 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -72,6 +72,8 @@
 
 #define SMU_11_0_GFX_BUSY_THRESHOLD 15
 
+static uint16_t link_speed[] = {25, 50, 80, 160};
+
 static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,			1),
 	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,		1),
@@ -2391,10 +2393,17 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
 
-	gpu_metrics->pcie_link_width =
-			smu_v11_0_get_current_pcie_link_width(smu);
-	gpu_metrics->pcie_link_speed =
-			smu_v11_0_get_current_pcie_link_speed(smu);
+	if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
+	      ((adev->asic_type == CHIP_NAVI12) && smu_version > 0x00341C00) ||
+	      ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) {
+		gpu_metrics->pcie_link_width = (uint16_t)metrics.PcieWidth;
+		gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
+	} else {
+		gpu_metrics->pcie_link_width =
+				smu_v11_0_get_current_pcie_link_width(smu);
+		gpu_metrics->pcie_link_speed =
+				smu_v11_0_get_current_pcie_link_speed(smu);
+	}
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index e74299da1739..6fd95764c952 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -73,6 +73,8 @@
 
 #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
 
+static uint16_t link_speed[] = {25, 50, 80, 160};
+
 static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,                 1),
 	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,               1),
@@ -3124,6 +3126,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 	SmuMetricsExternal_t metrics_external;
 	SmuMetrics_t *metrics =
 		&(metrics_external.SmuMetrics);
+	struct amdgpu_device *adev = smu->adev;
+	uint32_t smu_version;
 	int ret = 0;
 
 	ret = smu_cmn_get_metrics_table(smu,
@@ -3170,10 +3174,20 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
 
-	gpu_metrics->pcie_link_width =
-			smu_v11_0_get_current_pcie_link_width(smu);
-	gpu_metrics->pcie_link_speed =
-			smu_v11_0_get_current_pcie_link_speed(smu);
+	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+	if (ret)
+		return ret;
+
+	if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu_version > 0x003A1E00) ||
+	      ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu_version > 0x00410400)) {
+		gpu_metrics->pcie_link_width = (uint16_t)metrics->PcieWidth;
+		gpu_metrics->pcie_link_speed = link_speed[metrics->PcieRate];
+	} else {
+		gpu_metrics->pcie_link_width =
+				smu_v11_0_get_current_pcie_link_width(smu);
+		gpu_metrics->pcie_link_speed =
+				smu_v11_0_get_current_pcie_link_speed(smu);
+	}
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures
  2021-02-22  4:03 [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Evan Quan
  2021-02-22  4:03 ` [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving Evan Quan
@ 2021-02-22 21:45 ` Alex Deucher
  2021-02-23 11:48   ` Tom St Denis
  1 sibling, 1 reply; 6+ messages in thread
From: Alex Deucher @ 2021-02-22 21:45 UTC (permalink / raw)
  To: Evan Quan; +Cc: Deucher, Alexander, amd-gfx list

On Sun, Feb 21, 2021 at 11:03 PM Evan Quan <evan.quan@amd.com> wrote:
>
> To make sure they are naturally aligned.
>
> Change-Id: I496a5b79158bdbd2e17f179098939e050b2ad489
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Won't this break existing apps that query this info?  We need to make
sure umr and rocm-smi can handle this.

Alex


> ---
>  drivers/gpu/drm/amd/include/kgd_pp_interface.h        | 11 ++++++-----
>  drivers/gpu/drm/amd/pm/inc/smu_v11_0.h                |  4 ++--
>  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c |  8 ++++----
>  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |  8 ++++----
>  drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c        |  8 ++++----
>  5 files changed, 20 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index 828513412e20..3a8f64e1a10c 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -332,9 +332,9 @@ struct amd_pm_funcs {
>  };
>
>  struct metrics_table_header {
> -       uint16_t                        structure_size;
> -       uint8_t                         format_revision;
> -       uint8_t                         content_revision;
> +       uint32_t                        structure_size;
> +       uint16_t                        format_revision;
> +       uint16_t                        content_revision;
>  };
>
>  struct gpu_metrics_v1_0 {
> @@ -385,8 +385,9 @@ struct gpu_metrics_v1_0 {
>         uint16_t                        current_fan_speed;
>
>         /* Link width/speed */
> -       uint8_t                         pcie_link_width;
> -       uint8_t                         pcie_link_speed; // in 0.1 GT/s
> +       uint16_t                        pcie_link_width;
> +       uint16_t                        pcie_link_speed; // in 0.1 GT/s
> +       uint8_t                         padding[2];
>  };
>
>  struct gpu_metrics_v2_0 {
> diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> index 50dd1529b994..f4e7a330f67f 100644
> --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> @@ -284,11 +284,11 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
>
>  int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
>
> -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
> +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
>
>  int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
>
> -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
> +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
>
>  int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
>                               bool enablement);
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> index c0753029a8e2..95e905d8418d 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> @@ -52,8 +52,8 @@
>
>  #define LINK_WIDTH_MAX                         6
>  #define LINK_SPEED_MAX                         3
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> -static int link_speed[] = {25, 50, 80, 160};
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> +static uint16_t link_speed[] = {25, 50, 80, 160};
>
>  static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
>                 enum pp_clock_type type, uint32_t mask);
> @@ -2117,7 +2117,7 @@ static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
>                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
>  }
>
> -static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> +static uint16_t vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t width_level;
>
> @@ -2137,7 +2137,7 @@ static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
>                 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
>  }
>
> -static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> +static uint16_t vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t speed_level;
>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> index 87811b005b85..3d462405b572 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> @@ -57,8 +57,8 @@
>
>  #define LINK_WIDTH_MAX                         6
>  #define LINK_SPEED_MAX                         3
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> -static int link_speed[] = {25, 50, 80, 160};
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> +static uint16_t link_speed[] = {25, 50, 80, 160};
>
>  static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
>  {
> @@ -3279,7 +3279,7 @@ static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
>                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
>  }
>
> -static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> +static uint16_t vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t width_level;
>
> @@ -3299,7 +3299,7 @@ static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
>                 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
>  }
>
> -static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> +static uint16_t vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t speed_level;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> index 60ef63073ad4..86af9832ba9c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> @@ -99,8 +99,8 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
>  #define mmCG_THERMAL_STATUS_ARCT               0x90
>  #define mmCG_THERMAL_STATUS_ARCT_BASE_IDX      0
>
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> -static int link_speed[] = {25, 50, 80, 160};
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> +static uint16_t link_speed[] = {25, 50, 80, 160};
>
>  int smu_v11_0_init_microcode(struct smu_context *smu)
>  {
> @@ -2134,7 +2134,7 @@ int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
>                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
>  }
>
> -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
> +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
>  {
>         uint32_t width_level;
>
> @@ -2154,7 +2154,7 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
>                 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
>  }
>
> -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
> +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
>  {
>         uint32_t speed_level;
>
> --
> 2.29.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving
  2021-02-22  4:03 ` [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving Evan Quan
@ 2021-02-22 21:47   ` Alex Deucher
  2021-02-23  2:03     ` Quan, Evan
  0 siblings, 1 reply; 6+ messages in thread
From: Alex Deucher @ 2021-02-22 21:47 UTC (permalink / raw)
  To: Evan Quan; +Cc: Deucher, Alexander, amd-gfx list

On Sun, Feb 21, 2021 at 11:04 PM Evan Quan <evan.quan@amd.com> wrote:
>
> By using the information provided by PMFW when available.
>
> Change-Id: I1afec4cd07ac9608861ee07c449e320e3f36a932
> Signed-off-by: Evan Quan <evan.quan@amd.com>

What about arcturus?
Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 17 ++++++++++----
>  .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   | 22 +++++++++++++++----
>  2 files changed, 31 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 29e04f33f276..7fe2876c99fe 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -72,6 +72,8 @@
>
>  #define SMU_11_0_GFX_BUSY_THRESHOLD 15
>
> +static uint16_t link_speed[] = {25, 50, 80, 160};
> +
>  static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
>         MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  1),
>         MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                1),
> @@ -2391,10 +2393,17 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
>
>         gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
>
> -       gpu_metrics->pcie_link_width =
> -                       smu_v11_0_get_current_pcie_link_width(smu);
> -       gpu_metrics->pcie_link_speed =
> -                       smu_v11_0_get_current_pcie_link_speed(smu);
> +       if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
> +             ((adev->asic_type == CHIP_NAVI12) && smu_version > 0x00341C00) ||
> +             ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) {
> +               gpu_metrics->pcie_link_width = (uint16_t)metrics.PcieWidth;
> +               gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
> +       } else {
> +               gpu_metrics->pcie_link_width =
> +                               smu_v11_0_get_current_pcie_link_width(smu);
> +               gpu_metrics->pcie_link_speed =
> +                               smu_v11_0_get_current_pcie_link_speed(smu);
> +       }
>
>         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index e74299da1739..6fd95764c952 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -73,6 +73,8 @@
>
>  #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
>
> +static uint16_t link_speed[] = {25, 50, 80, 160};
> +
>  static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
>         MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                 1),
>         MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,               1),
> @@ -3124,6 +3126,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
>         SmuMetricsExternal_t metrics_external;
>         SmuMetrics_t *metrics =
>                 &(metrics_external.SmuMetrics);
> +       struct amdgpu_device *adev = smu->adev;
> +       uint32_t smu_version;
>         int ret = 0;
>
>         ret = smu_cmn_get_metrics_table(smu,
> @@ -3170,10 +3174,20 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
>
>         gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
>
> -       gpu_metrics->pcie_link_width =
> -                       smu_v11_0_get_current_pcie_link_width(smu);
> -       gpu_metrics->pcie_link_speed =
> -                       smu_v11_0_get_current_pcie_link_speed(smu);
> +       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
> +       if (ret)
> +               return ret;
> +
> +       if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu_version > 0x003A1E00) ||
> +             ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu_version > 0x00410400)) {
> +               gpu_metrics->pcie_link_width = (uint16_t)metrics->PcieWidth;
> +               gpu_metrics->pcie_link_speed = link_speed[metrics->PcieRate];
> +       } else {
> +               gpu_metrics->pcie_link_width =
> +                               smu_v11_0_get_current_pcie_link_width(smu);
> +               gpu_metrics->pcie_link_speed =
> +                               smu_v11_0_get_current_pcie_link_speed(smu);
> +       }
>
>         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>
> --
> 2.29.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving
  2021-02-22 21:47   ` Alex Deucher
@ 2021-02-23  2:03     ` Quan, Evan
  0 siblings, 0 replies; 6+ messages in thread
From: Quan, Evan @ 2021-02-23  2:03 UTC (permalink / raw)
  To: Alex Deucher; +Cc: Deucher, Alexander, amd-gfx list

[AMD Official Use Only - Internal Distribution Only]

PMFW of Arcturus does not expose us those information.
So, we have to stick to current implementation(smu_v11_0_get_current_pcie_link_width/speed) for Arcturus.

Regards
Evan
-----Original Message-----
From: Alex Deucher <alexdeucher@gmail.com> 
Sent: Tuesday, February 23, 2021 5:48 AM
To: Quan, Evan <Evan.Quan@amd.com>
Cc: amd-gfx list <amd-gfx@lists.freedesktop.org>; Deucher, Alexander <Alexander.Deucher@amd.com>
Subject: Re: [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving

On Sun, Feb 21, 2021 at 11:04 PM Evan Quan <evan.quan@amd.com> wrote:
>
> By using the information provided by PMFW when available.
>
> Change-Id: I1afec4cd07ac9608861ee07c449e320e3f36a932
> Signed-off-by: Evan Quan <evan.quan@amd.com>

What about arcturus?
Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 17 ++++++++++----
>  .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   | 22 +++++++++++++++----
>  2 files changed, 31 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 29e04f33f276..7fe2876c99fe 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -72,6 +72,8 @@
>
>  #define SMU_11_0_GFX_BUSY_THRESHOLD 15
>
> +static uint16_t link_speed[] = {25, 50, 80, 160};
> +
>  static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
>         MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  1),
>         MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                1),
> @@ -2391,10 +2393,17 @@ static ssize_t navi10_get_gpu_metrics(struct 
> smu_context *smu,
>
>         gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
>
> -       gpu_metrics->pcie_link_width =
> -                       smu_v11_0_get_current_pcie_link_width(smu);
> -       gpu_metrics->pcie_link_speed =
> -                       smu_v11_0_get_current_pcie_link_speed(smu);
> +       if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
> +             ((adev->asic_type == CHIP_NAVI12) && smu_version > 0x00341C00) ||
> +             ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) {
> +               gpu_metrics->pcie_link_width = (uint16_t)metrics.PcieWidth;
> +               gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
> +       } else {
> +               gpu_metrics->pcie_link_width =
> +                               smu_v11_0_get_current_pcie_link_width(smu);
> +               gpu_metrics->pcie_link_speed =
> +                               smu_v11_0_get_current_pcie_link_speed(smu);
> +       }
>
>         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c 
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index e74299da1739..6fd95764c952 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -73,6 +73,8 @@
>
>  #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
>
> +static uint16_t link_speed[] = {25, 50, 80, 160};
> +
>  static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
>         MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                 1),
>         MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,               1),
> @@ -3124,6 +3126,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
>         SmuMetricsExternal_t metrics_external;
>         SmuMetrics_t *metrics =
>                 &(metrics_external.SmuMetrics);
> +       struct amdgpu_device *adev = smu->adev;
> +       uint32_t smu_version;
>         int ret = 0;
>
>         ret = smu_cmn_get_metrics_table(smu, @@ -3170,10 +3174,20 @@ 
> static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
>
>         gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
>
> -       gpu_metrics->pcie_link_width =
> -                       smu_v11_0_get_current_pcie_link_width(smu);
> -       gpu_metrics->pcie_link_speed =
> -                       smu_v11_0_get_current_pcie_link_speed(smu);
> +       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
> +       if (ret)
> +               return ret;
> +
> +       if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu_version > 0x003A1E00) ||
> +             ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu_version > 0x00410400)) {
> +               gpu_metrics->pcie_link_width = (uint16_t)metrics->PcieWidth;
> +               gpu_metrics->pcie_link_speed = link_speed[metrics->PcieRate];
> +       } else {
> +               gpu_metrics->pcie_link_width =
> +                               smu_v11_0_get_current_pcie_link_width(smu);
> +               gpu_metrics->pcie_link_speed =
> +                               smu_v11_0_get_current_pcie_link_speed(smu);
> +       }
>
>         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>
> --
> 2.29.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Cev
> an.quan%40amd.com%7Cdbf961954f9c4ef66b1308d8d77b8592%7C3dd8961fe4884e6
> 08e11a82d994e183d%7C0%7C0%7C637496272809706224%7CUnknown%7CTWFpbGZsb3d
> 8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C
> 1000&amp;sdata=29zqelJSqHdhHVMPUqag5i1Sv9mrUhHSGysA52YYQXs%3D&amp;rese
> rved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures
  2021-02-22 21:45 ` [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Alex Deucher
@ 2021-02-23 11:48   ` Tom St Denis
  0 siblings, 0 replies; 6+ messages in thread
From: Tom St Denis @ 2021-02-23 11:48 UTC (permalink / raw)
  To: Alex Deucher; +Cc: Deucher, Alexander, Evan Quan, amd-gfx list


[-- Attachment #1.1: Type: text/plain, Size: 8189 bytes --]

This is why I advocated for the sysfs output to be either standard packed
or serialized.  It was a hack as it is anyways.

On Mon, Feb 22, 2021 at 4:46 PM Alex Deucher <alexdeucher@gmail.com> wrote:

> On Sun, Feb 21, 2021 at 11:03 PM Evan Quan <evan.quan@amd.com> wrote:
> >
> > To make sure they are naturally aligned.
> >
> > Change-Id: I496a5b79158bdbd2e17f179098939e050b2ad489
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
>
> Won't this break existing apps that query this info?  We need to make
> sure umr and rocm-smi can handle this.
>
> Alex
>
>
> > ---
> >  drivers/gpu/drm/amd/include/kgd_pp_interface.h        | 11 ++++++-----
> >  drivers/gpu/drm/amd/pm/inc/smu_v11_0.h                |  4 ++--
> >  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c |  8 ++++----
> >  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |  8 ++++----
> >  drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c        |  8 ++++----
> >  5 files changed, 20 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > index 828513412e20..3a8f64e1a10c 100644
> > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > @@ -332,9 +332,9 @@ struct amd_pm_funcs {
> >  };
> >
> >  struct metrics_table_header {
> > -       uint16_t                        structure_size;
> > -       uint8_t                         format_revision;
> > -       uint8_t                         content_revision;
> > +       uint32_t                        structure_size;
> > +       uint16_t                        format_revision;
> > +       uint16_t                        content_revision;
> >  };
> >
> >  struct gpu_metrics_v1_0 {
> > @@ -385,8 +385,9 @@ struct gpu_metrics_v1_0 {
> >         uint16_t                        current_fan_speed;
> >
> >         /* Link width/speed */
> > -       uint8_t                         pcie_link_width;
> > -       uint8_t                         pcie_link_speed; // in 0.1 GT/s
> > +       uint16_t                        pcie_link_width;
> > +       uint16_t                        pcie_link_speed; // in 0.1 GT/s
> > +       uint8_t                         padding[2];
> >  };
> >
> >  struct gpu_metrics_v2_0 {
> > diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> > index 50dd1529b994..f4e7a330f67f 100644
> > --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> > +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> > @@ -284,11 +284,11 @@ int smu_v11_0_get_dpm_level_range(struct
> smu_context *smu,
> >
> >  int smu_v11_0_get_current_pcie_link_width_level(struct smu_context
> *smu);
> >
> > -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
> > +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
> >
> >  int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context
> *smu);
> >
> > -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
> > +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
> >
> >  int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
> >                               bool enablement);
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> > index c0753029a8e2..95e905d8418d 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> > @@ -52,8 +52,8 @@
> >
> >  #define LINK_WIDTH_MAX                         6
> >  #define LINK_SPEED_MAX                         3
> > -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > -static int link_speed[] = {25, 50, 80, 160};
> > +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > +static uint16_t link_speed[] = {25, 50, 80, 160};
> >
> >  static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
> >                 enum pp_clock_type type, uint32_t mask);
> > @@ -2117,7 +2117,7 @@ static int
> vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
> >                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
> >  }
> >
> > -static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega12_get_current_pcie_link_width(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t width_level;
> >
> > @@ -2137,7 +2137,7 @@ static int
> vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
> >                 >>
> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
> >  }
> >
> > -static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega12_get_current_pcie_link_speed(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t speed_level;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> > index 87811b005b85..3d462405b572 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> > @@ -57,8 +57,8 @@
> >
> >  #define LINK_WIDTH_MAX                         6
> >  #define LINK_SPEED_MAX                         3
> > -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > -static int link_speed[] = {25, 50, 80, 160};
> > +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > +static uint16_t link_speed[] = {25, 50, 80, 160};
> >
> >  static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
> >  {
> > @@ -3279,7 +3279,7 @@ static int
> vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
> >                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
> >  }
> >
> > -static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega20_get_current_pcie_link_width(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t width_level;
> >
> > @@ -3299,7 +3299,7 @@ static int
> vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
> >                 >>
> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
> >  }
> >
> > -static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega20_get_current_pcie_link_speed(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t speed_level;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> > index 60ef63073ad4..86af9832ba9c 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> > @@ -99,8 +99,8 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
> >  #define mmCG_THERMAL_STATUS_ARCT               0x90
> >  #define mmCG_THERMAL_STATUS_ARCT_BASE_IDX      0
> >
> > -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > -static int link_speed[] = {25, 50, 80, 160};
> > +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > +static uint16_t link_speed[] = {25, 50, 80, 160};
> >
> >  int smu_v11_0_init_microcode(struct smu_context *smu)
> >  {
> > @@ -2134,7 +2134,7 @@ int
> smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
> >                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
> >  }
> >
> > -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
> > +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
> >  {
> >         uint32_t width_level;
> >
> > @@ -2154,7 +2154,7 @@ int
> smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
> >                 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
> >  }
> >
> > -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
> > +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
> >  {
> >         uint32_t speed_level;
> >
> > --
> > 2.29.0
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>

[-- Attachment #1.2: Type: text/html, Size: 10409 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-02-23 11:49 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-22  4:03 [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Evan Quan
2021-02-22  4:03 ` [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving Evan Quan
2021-02-22 21:47   ` Alex Deucher
2021-02-23  2:03     ` Quan, Evan
2021-02-22 21:45 ` [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Alex Deucher
2021-02-23 11:48   ` Tom St Denis

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.