All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/amdgpu/pm: Update metrics table
@ 2021-05-14 21:01 David M Nieto
  2021-05-14 21:01 ` [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
  0 siblings, 1 reply; 19+ messages in thread
From: David M Nieto @ 2021-05-14 21:01 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

expand metrics table with voltages and frequency ranges

Signed-off-by: David M Nieto <david.nieto@amd.com>
Change-Id: I2a8d63d0abf613a616518c1d7caf9f5da693e920
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    | 99 +++++++++++++++++++
 drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c        |  3 +
 2 files changed, 102 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2d13131a432..7e2b22a0c41c 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -536,6 +536,105 @@ struct gpu_metrics_v1_2 {
 	uint64_t			firmware_timestamp;
 };
 
+struct gpu_metrics_v1_3 {
+	struct metrics_table_header	common_header;
+
+	/* Temperature */
+	uint16_t			temperature_edge;
+	uint16_t			temperature_hotspot;
+	uint16_t			temperature_mem;
+	uint16_t			temperature_vrgfx;
+	uint16_t			temperature_vrsoc;
+	uint16_t			temperature_vrmem;
+
+	/* Utilization */
+	uint16_t			average_gfx_activity;
+	uint16_t			average_umc_activity; // memory controller
+	uint16_t			average_mm_activity; // UVD or VCN
+
+	/* Power/Energy */
+	uint16_t			average_socket_power;
+	uint64_t			energy_accumulator;
+
+	/* Driver attached timestamp (in ns) */
+	uint64_t			system_clock_counter;
+
+	/* Average clocks */
+	uint16_t			average_gfxclk_frequency;
+	uint16_t			average_socclk_frequency;
+	uint16_t			average_uclk_frequency;
+	uint16_t			average_vclk0_frequency;
+	uint16_t			average_dclk0_frequency;
+	uint16_t			average_vclk1_frequency;
+	uint16_t			average_dclk1_frequency;
+
+	/* Current clocks */
+	uint16_t			current_gfxclk;
+	uint16_t			current_socclk;
+	uint16_t			current_uclk;
+	uint16_t			current_vclk0;
+	uint16_t			current_dclk0;
+	uint16_t			current_vclk1;
+	uint16_t			current_dclk1;
+
+	/* Throttle status */
+	uint32_t			throttle_status;
+
+	/* Fans */
+	uint16_t			current_fan_speed;
+
+	/* Link width/speed */
+	uint16_t			pcie_link_width;
+	uint16_t			pcie_link_speed; // in 0.1 GT/s
+
+	uint16_t			padding;
+
+	uint32_t			gfx_activity_acc;
+	uint32_t			mem_activity_acc;
+
+	uint16_t			temperature_hbm[NUM_HBM_INSTANCES];
+
+	/* PMFW attached timestamp (10ns resolution) */
+	uint64_t			firmware_timestamp;
+
+	/* Voltage (mV) */
+	uint16_t			voltage_soc;
+	uint16_t			voltage_gfx;
+	uint16_t			voltage_mem;
+
+	/* DPM levels */
+	uint8_t				max_gfxclk_dpm;
+	uint16_t			max_gfxclk_frequency;
+	uint16_t			min_gfxclk_frequency;
+
+	uint8_t				max_socclk_dpm;
+	uint16_t			max_socclk_frequency;
+	uint16_t			min_socclk_frequency;
+
+	uint8_t				max_uclk_dpm;
+	uint16_t			max_uclk_frequency;
+	uint16_t			min_uclk_frequency;
+
+	uint8_t				max_vclk0_dpm;
+	uint16_t			max_vclk0_frequency;
+	uint16_t			min_vclk0_frequency;
+
+	uint8_t				max_dclk0_dpm;
+	uint16_t			max_dclk0_frequency;
+	uint16_t			min_dclk0_frequency;
+
+	uint8_t				max_vclk1_dpm;
+	uint16_t			max_vclk1_frequency;
+	uint16_t			min_vclk1_frequency;
+
+	uint8_t				max_dclk1_dpm;
+	uint16_t			max_dclk1_frequency;
+	uint16_t			min_dclk1_frequency;
+
+	/* Power Limit */
+	uint16_t			max_socket_power;
+};
+
 /*
  * gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
  * Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 0934e5b3aa17..0ceb7329838c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -764,6 +764,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
 	case METRICS_VERSION(1, 2):
 		structure_size = sizeof(struct gpu_metrics_v1_2);
 		break;
+	case METRICS_VERSION(1, 3):
+		structure_size = sizeof(struct gpu_metrics_v1_3);
+		break;
 	case METRICS_VERSION(2, 0):
 		structure_size = sizeof(struct gpu_metrics_v2_0);
 		break;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x
  2021-05-14 21:01 [PATCH 1/2] drm/amdgpu/pm: Update metrics table David M Nieto
@ 2021-05-14 21:01 ` David M Nieto
  2021-05-17  6:28   ` Lazar, Lijo
  0 siblings, 1 reply; 19+ messages in thread
From: David M Nieto @ 2021-05-14 21:01 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Fill voltage and frequency ranges fields

Signed-off-by: David M Nieto <david.nieto@amd.com>
Change-Id: I07f926dea46e80a96e1c972ba9dbc804b812d503
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 434 +++++++++++++++++-
 1 file changed, 417 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..a412fa9a95ec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
 		goto err0_out;
 	smu_table->metrics_time = 0;
 
-	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
 	if (!smu_table->gpu_metrics_table)
 		goto err1_out;
@@ -2627,10 +2627,11 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_legacy_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2646,7 +2647,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2681,19 +2682,119 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2709,7 +2810,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2746,19 +2847,119 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_legacy_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2774,7 +2975,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2814,19 +3015,119 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2842,7 +3143,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2884,9 +3185,108 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* RE: [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x
  2021-05-14 21:01 ` [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
@ 2021-05-17  6:28   ` Lazar, Lijo
  2021-05-17 20:06     ` Nieto, David M
  2021-05-18  4:09     ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
  0 siblings, 2 replies; 19+ messages in thread
From: Lazar, Lijo @ 2021-05-17  6:28 UTC (permalink / raw)
  To: Nieto, David M, amd-gfx; +Cc: Nieto, David M

[AMD Public Use]

Metrics table carries dynamic state information of the ASIC. There are other pp_* nodes which carry static information about min/max and levels supported and that is a one-time query. Why there is a need to put everything in metrics data?

Thanks,
Lijo

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of David M Nieto
Sent: Saturday, May 15, 2021 2:32 AM
To: amd-gfx@lists.freedesktop.org
Cc: Nieto, David M <David.Nieto@amd.com>
Subject: [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x

Fill voltage and frequency ranges fields

Signed-off-by: David M Nieto <david.nieto@amd.com>
Change-Id: I07f926dea46e80a96e1c972ba9dbc804b812d503
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 434 +++++++++++++++++-
 1 file changed, 417 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..a412fa9a95ec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
 		goto err0_out;
 	smu_table->metrics_time = 0;
 
-	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
 	if (!smu_table->gpu_metrics_table)
 		goto err1_out;
@@ -2627,10 +2627,11 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_legacy_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2646,7 +2647,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2681,19 +2682,119 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * 
+metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2709,7 +2810,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2746,19 +2847,119 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * 
+metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_legacy_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2774,7 +2975,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2814,19 +3015,119 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * 
+metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2842,7 +3143,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2884,9 +3185,108 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * 
+metrics.CurrSocVoltageOffset) / 100;
+
+	gpu_metrics->max_socket_power = smu->power_limit;
+
+	/* Frequency and DPM ranges */
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+	if (ret)
+		goto out;
+	gpu_metrics->min_dclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+	if (ret)
+		goto out;
+	gpu_metrics->max_gfxclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+			gpu_metrics->max_gfxclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_gfxclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+			gpu_metrics->max_socclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_socclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+			gpu_metrics->max_uclk_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_uclk_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+			gpu_metrics->max_vclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_vclk0_frequency = freq;
+
+	ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_dpm = dpm;
+
+	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+			gpu_metrics->max_dclk0_dpm - 1, &freq);
+	if (ret)
+		goto out;
+
+	gpu_metrics->max_dclk0_frequency = freq;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
--
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Clijo.lazar%40amd.com%7C2aabcce1455c410dec6008d9171b80a8%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637566229187346163%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=NgHqow7Ra1rXTqz4suB8Vv%2FASOYRRJAtCgLUcpHRDto%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x
  2021-05-17  6:28   ` Lazar, Lijo
@ 2021-05-17 20:06     ` Nieto, David M
  2021-05-18  4:09     ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
  1 sibling, 0 replies; 19+ messages in thread
From: Nieto, David M @ 2021-05-17 20:06 UTC (permalink / raw)
  To: Lazar, Lijo, amd-gfx


[-- Attachment #1.1: Type: text/plain, Size: 20738 bytes --]

[AMD Public Use]

I dont think the pp_nodes expose the vclk dclk nodes, but it might be better to rework this patch to expose those instead, and just add the voltages...
________________________________
From: Lazar, Lijo <Lijo.Lazar@amd.com>
Sent: Sunday, May 16, 2021 11:28 PM
To: Nieto, David M <David.Nieto@amd.com>; amd-gfx@lists.freedesktop.org <amd-gfx@lists.freedesktop.org>
Cc: Nieto, David M <David.Nieto@amd.com>
Subject: RE: [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x

[AMD Public Use]

Metrics table carries dynamic state information of the ASIC. There are other pp_* nodes which carry static information about min/max and levels supported and that is a one-time query. Why there is a need to put everything in metrics data?

Thanks,
Lijo

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of David M Nieto
Sent: Saturday, May 15, 2021 2:32 AM
To: amd-gfx@lists.freedesktop.org
Cc: Nieto, David M <David.Nieto@amd.com>
Subject: [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x

Fill voltage and frequency ranges fields

Signed-off-by: David M Nieto <david.nieto@amd.com>
Change-Id: I07f926dea46e80a96e1c972ba9dbc804b812d503
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 434 +++++++++++++++++-
 1 file changed, 417 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..a412fa9a95ec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
                 goto err0_out;
         smu_table->metrics_time = 0;

-       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
         smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
         if (!smu_table->gpu_metrics_table)
                 goto err1_out;
@@ -2627,10 +2627,11 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
                                              void **table)
 {
         struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
         SmuMetrics_legacy_t metrics;
         int ret = 0;
+       int freq = 0, dpm = 0;

         mutex_lock(&smu->metrics_lock);

@@ -2646,7 +2647,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,

         mutex_unlock(&smu->metrics_lock);

-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);

         gpu_metrics->temperature_edge = metrics.TemperatureEdge;
         gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2681,19 +2682,119 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,

         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();

+       gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       gpu_metrics->voltage_soc = (155000 - 625 *
+metrics.CurrSocVoltageOffset) / 100;
+
+       gpu_metrics->max_socket_power = smu->power_limit;
+
+       /* Frequency and DPM ranges */
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_dclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+       if (ret)
+               goto out;
+       gpu_metrics->max_gfxclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+                       gpu_metrics->max_gfxclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+                       gpu_metrics->max_socclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+                       gpu_metrics->max_uclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+                       gpu_metrics->max_vclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+                       gpu_metrics->max_dclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_frequency = freq;
+
         *table = (void *)gpu_metrics;

-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
+out:
+       return ret;
 }

 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
                                       void **table)
 {
         struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
         SmuMetrics_t metrics;
         int ret = 0;
+       int freq = 0, dpm = 0;

         mutex_lock(&smu->metrics_lock);

@@ -2709,7 +2810,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,

         mutex_unlock(&smu->metrics_lock);

-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);

         gpu_metrics->temperature_edge = metrics.TemperatureEdge;
         gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2746,19 +2847,119 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,

         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();

+       gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       gpu_metrics->voltage_soc = (155000 - 625 *
+metrics.CurrSocVoltageOffset) / 100;
+
+       gpu_metrics->max_socket_power = smu->power_limit;
+
+       /* Frequency and DPM ranges */
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_dclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+       if (ret)
+               goto out;
+       gpu_metrics->max_gfxclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+                       gpu_metrics->max_gfxclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+                       gpu_metrics->max_socclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+                       gpu_metrics->max_uclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+                       gpu_metrics->max_vclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+                       gpu_metrics->max_dclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_frequency = freq;
+
         *table = (void *)gpu_metrics;

-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
+out:
+       return ret;
 }

 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
                                              void **table)
 {
         struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
         SmuMetrics_NV12_legacy_t metrics;
         int ret = 0;
+       int freq = 0, dpm = 0;

         mutex_lock(&smu->metrics_lock);

@@ -2774,7 +2975,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,

         mutex_unlock(&smu->metrics_lock);

-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);

         gpu_metrics->temperature_edge = metrics.TemperatureEdge;
         gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2814,19 +3015,119 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,

         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();

+       gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       gpu_metrics->voltage_soc = (155000 - 625 *
+metrics.CurrSocVoltageOffset) / 100;
+
+       gpu_metrics->max_socket_power = smu->power_limit;
+
+       /* Frequency and DPM ranges */
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_dclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+       if (ret)
+               goto out;
+       gpu_metrics->max_gfxclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+                       gpu_metrics->max_gfxclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+                       gpu_metrics->max_socclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+                       gpu_metrics->max_uclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+                       gpu_metrics->max_vclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+                       gpu_metrics->max_dclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_frequency = freq;
+
         *table = (void *)gpu_metrics;

-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
+out:
+       return ret;
 }

 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
                                       void **table)
 {
         struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
         SmuMetrics_NV12_t metrics;
         int ret = 0;
+       int freq = 0, dpm = 0;

         mutex_lock(&smu->metrics_lock);

@@ -2842,7 +3143,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,

         mutex_unlock(&smu->metrics_lock);

-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);

         gpu_metrics->temperature_edge = metrics.TemperatureEdge;
         gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2884,9 +3185,108 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,

         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();

+       gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       gpu_metrics->voltage_soc = (155000 - 625 *
+metrics.CurrSocVoltageOffset) / 100;
+
+       gpu_metrics->max_socket_power = smu->power_limit;
+
+       /* Frequency and DPM ranges */
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK, 0, &freq);
+       if (ret)
+               goto out;
+       gpu_metrics->min_dclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_GFXCLK, &dpm);
+       if (ret)
+               goto out;
+       gpu_metrics->max_gfxclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_GFXCLK,
+                       gpu_metrics->max_gfxclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_gfxclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_SOCCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_SOCCLK,
+                       gpu_metrics->max_socclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_socclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK,
+                       gpu_metrics->max_uclk_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_uclk_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_VCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_VCLK,
+                       gpu_metrics->max_vclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_vclk0_frequency = freq;
+
+       ret = smu_v11_0_get_dpm_level_count(smu, SMU_DCLK, &dpm);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_dpm = dpm;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_DCLK,
+                       gpu_metrics->max_dclk0_dpm - 1, &freq);
+       if (ret)
+               goto out;
+
+       gpu_metrics->max_dclk0_frequency = freq;
+
         *table = (void *)gpu_metrics;

-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
+out:
+       return ret;
 }

 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
--
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Clijo.lazar%40amd.com%7C2aabcce1455c410dec6008d9171b80a8%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637566229187346163%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=NgHqow7Ra1rXTqz4suB8Vv%2FASOYRRJAtCgLUcpHRDto%3D&amp;reserved=0

[-- Attachment #1.2: Type: text/html, Size: 41928 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 1/3] drm/amdgpu/pm: Update metrics table
  2021-05-17  6:28   ` Lazar, Lijo
  2021-05-17 20:06     ` Nieto, David M
@ 2021-05-18  4:09     ` David M Nieto
  2021-05-18  4:09       ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
  2021-05-18  4:09       ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
  1 sibling, 2 replies; 19+ messages in thread
From: David M Nieto @ 2021-05-18  4:09 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

expand metrics table with voltages and frequency ranges

Signed-off-by: David M Nieto <david.nieto@amd.com>
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    | 69 +++++++++++++++++++
 drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c        |  3 +
 2 files changed, 72 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2d13131a432..b1cd52a9d684 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -536,6 +536,75 @@ struct gpu_metrics_v1_2 {
 	uint64_t			firmware_timestamp;
 };
 
+struct gpu_metrics_v1_3 {
+	struct metrics_table_header	common_header;
+
+	/* Temperature */
+	uint16_t			temperature_edge;
+	uint16_t			temperature_hotspot;
+	uint16_t			temperature_mem;
+	uint16_t			temperature_vrgfx;
+	uint16_t			temperature_vrsoc;
+	uint16_t			temperature_vrmem;
+
+	/* Utilization */
+	uint16_t			average_gfx_activity;
+	uint16_t			average_umc_activity; // memory controller
+	uint16_t			average_mm_activity; // UVD or VCN
+
+	/* Power/Energy */
+	uint16_t			average_socket_power;
+	uint64_t			energy_accumulator;
+
+	/* Driver attached timestamp (in ns) */
+	uint64_t			system_clock_counter;
+
+	/* Average clocks */
+	uint16_t			average_gfxclk_frequency;
+	uint16_t			average_socclk_frequency;
+	uint16_t			average_uclk_frequency;
+	uint16_t			average_vclk0_frequency;
+	uint16_t			average_dclk0_frequency;
+	uint16_t			average_vclk1_frequency;
+	uint16_t			average_dclk1_frequency;
+
+	/* Current clocks */
+	uint16_t			current_gfxclk;
+	uint16_t			current_socclk;
+	uint16_t			current_uclk;
+	uint16_t			current_vclk0;
+	uint16_t			current_dclk0;
+	uint16_t			current_vclk1;
+	uint16_t			current_dclk1;
+
+	/* Throttle status */
+	uint32_t			throttle_status;
+
+	/* Fans */
+	uint16_t			current_fan_speed;
+
+	/* Link width/speed */
+	uint16_t			pcie_link_width;
+	uint16_t			pcie_link_speed; // in 0.1 GT/s
+
+	uint16_t			padding;
+
+	uint32_t			gfx_activity_acc;
+	uint32_t			mem_activity_acc;
+
+	uint16_t			temperature_hbm[NUM_HBM_INSTANCES];
+
+	/* PMFW attached timestamp (10ns resolution) */
+	uint64_t			firmware_timestamp;
+
+	/* Voltage (mV) */
+	uint16_t			voltage_soc;
+	uint16_t			voltage_gfx;
+	uint16_t			voltage_mem;
+
+	uint16_t			padding1;
+};
+
 /*
  * gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
  * Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 0934e5b3aa17..0ceb7329838c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -764,6 +764,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
 	case METRICS_VERSION(1, 2):
 		structure_size = sizeof(struct gpu_metrics_v1_2);
 		break;
+	case METRICS_VERSION(1, 3):
+		structure_size = sizeof(struct gpu_metrics_v1_3);
+		break;
 	case METRICS_VERSION(2, 0):
 		structure_size = sizeof(struct gpu_metrics_v2_0);
 		break;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x
  2021-05-18  4:09     ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
@ 2021-05-18  4:09       ` David M Nieto
  2021-05-19  5:35         ` Lijo Lazar
  2021-05-18  4:09       ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
  1 sibling, 1 reply; 19+ messages in thread
From: David M Nieto @ 2021-05-18  4:09 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Fill voltage fields in metrics table

Signed-off-by: David M Nieto <david.nieto@amd.com>
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 62 ++++++++++++++-----
 1 file changed, 45 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..9339fd24ae8c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
 		goto err0_out;
 	smu_table->metrics_time = 0;
 
-	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
 	if (!smu_table->gpu_metrics_table)
 		goto err1_out;
@@ -2627,10 +2627,11 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_legacy_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2646,7 +2647,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2681,19 +2682,26 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2709,7 +2717,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2746,19 +2754,26 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_legacy_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2774,7 +2789,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2814,19 +2829,26 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_t metrics;
 	int ret = 0;
+	int freq = 0, dpm = 0;
 
 	mutex_lock(&smu->metrics_lock);
 
@@ -2842,7 +2864,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2884,9 +2906,15 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm
  2021-05-18  4:09     ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
  2021-05-18  4:09       ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
@ 2021-05-18  4:09       ` David M Nieto
  2021-05-19  5:43         ` Lijo Lazar
  1 sibling, 1 reply; 19+ messages in thread
From: David M Nieto @ 2021-05-18  4:09 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Enable displaying DPM levels for VCN clocks
in swsmu supported ASICs

Signed-off-by: David M Nieto <david.nieto@amd.com>
---
 .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 46 ++++++++++++++++++
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   |  4 ++
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   |  8 ++++
 .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 38 +++++++++++++++
 .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 48 +++++++++++++++++++
 5 files changed, 144 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 77693bf0840c..1735a96dd307 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
 				now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
 	case SMU_PCIE:
 		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
 		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 9339fd24ae8c..2e801f2e42a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 	case SMU_DCEFCLK:
 		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
@@ -1444,6 +1446,8 @@ static int navi10_force_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 		/* There is only 2 levels for fine grained DPM */
 		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 0c40a54c46d7..6da6d08d8858 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_VCLK1:
+	case SMU_DCLK:
+	case SMU_DCLK1:
 	case SMU_DCEFCLK:
 		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
@@ -1150,6 +1154,10 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_VCLK1:
+	case SMU_DCLK:
+	case SMU_DCLK1:
 		/* There is only 2 levels for fine grained DPM */
 		if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index f43b4c623685..3a6b52b7b647 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
 	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
 	CLK_MAP(UCLK, CLOCK_FCLK),
 	CLK_MAP(MCLK, CLOCK_FCLK),
+	CLK_MAP(VCLK, CLOCK_VCLK),
+	CLK_MAP(DCLK, CLOCK_DCLK),
 };
 
 static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
 			return -EINVAL;
 		*freq = clk_table->FClocks[dpm_level].Freq;
 		break;
+	case SMU_VCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->VClocks[dpm_level].Freq;
+		break;
+	case SMU_DCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->DClocks[dpm_level].Freq;
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -296,6 +309,8 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
 		case SMU_UCLK:
 		case SMU_FCLK:
 		case SMU_MCLK:
+		case SMU_VCLK:
+		case SMU_DCLK:
 			ret = renoir_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
 			if (ret)
 				goto failed;
@@ -324,6 +339,8 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
 		case SMU_UCLK:
 		case SMU_FCLK:
 		case SMU_MCLK:
+		case SMU_DCLK:
+		case SMU_VCLK:
 			ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min);
 			if (ret)
 				goto failed;
@@ -532,6 +549,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 		count = NUM_FCLK_DPM_LEVELS;
 		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
 		break;
+	case SMU_VCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_VCLK];
+		break;
+	case SMU_DCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_DCLK];
+		break;
 	default:
 		break;
 	}
@@ -543,6 +568,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_DCEFCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 		for (i = 0; i < count; i++) {
 			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
 			if (ret)
@@ -730,6 +757,17 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
 		clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
 	}
 
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
+		clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
+	}
+
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
+		clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
+	}
+
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 7c191a5d6db9..bc628326776c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -816,6 +816,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
 								       now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
 	default:
 		break;
 	}
@@ -920,6 +966,8 @@ static int aldebaran_force_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_SOCCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 		/*
 		 * Should not arrive here since aldebaran does not
 		 * support mclk/socclk/fclk softmin/softmax settings
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x
  2021-05-18  4:09       ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
@ 2021-05-19  5:35         ` Lijo Lazar
  0 siblings, 0 replies; 19+ messages in thread
From: Lijo Lazar @ 2021-05-19  5:35 UTC (permalink / raw)
  To: David M Nieto, amd-gfx



On 5/18/2021 9:39 AM, David M Nieto wrote:
> Fill voltage fields in metrics table
> 
> Signed-off-by: David M Nieto <david.nieto@amd.com>
> ---
>   .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 62 ++++++++++++++-----
>   1 file changed, 45 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index ac13042672ea..9339fd24ae8c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
>   		goto err0_out;
>   	smu_table->metrics_time = 0;
>   
> -	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
> +	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
>   	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
>   	if (!smu_table->gpu_metrics_table)
>   		goto err1_out;
> @@ -2627,10 +2627,11 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
>   					     void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_legacy_t metrics;
>   	int ret = 0;
> +	int freq = 0, dpm = 0;

Variables added, seems unused in new code.

>   	mutex_lock(&smu->metrics_lock);
>   
> @@ -2646,7 +2647,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2681,19 +2682,26 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +

It's better to add a non-zero check for offset values. Having 0 as 
offset value is unlikely, otherwise it could show the wrong voltage if 
FW is not passing the data.

Same comments for below functions also.

Thanks,
Lijo

>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
>   				      void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_t metrics;
>   	int ret = 0;
> +	int freq = 0, dpm = 0;
>   
>   	mutex_lock(&smu->metrics_lock);
>   
> @@ -2709,7 +2717,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2746,19 +2754,26 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +
>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
>   					     void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_NV12_legacy_t metrics;
>   	int ret = 0;
> +	int freq = 0, dpm = 0;
>   
>   	mutex_lock(&smu->metrics_lock);
>   
> @@ -2774,7 +2789,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2814,19 +2829,26 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +
>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
>   				      void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_NV12_t metrics;
>   	int ret = 0;
> +	int freq = 0, dpm = 0;
>   
>   	mutex_lock(&smu->metrics_lock);
>   
> @@ -2842,7 +2864,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2884,9 +2906,15 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +
>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
> 

-- 
Thanks,
Lijo
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm
  2021-05-18  4:09       ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
@ 2021-05-19  5:43         ` Lijo Lazar
  2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
                             ` (2 more replies)
  0 siblings, 3 replies; 19+ messages in thread
From: Lijo Lazar @ 2021-05-19  5:43 UTC (permalink / raw)
  To: David M Nieto, amd-gfx



On 5/18/2021 9:39 AM, David M Nieto wrote:
> Enable displaying DPM levels for VCN clocks
> in swsmu supported ASICs
> 
> Signed-off-by: David M Nieto <david.nieto@amd.com>
> ---
>   .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 46 ++++++++++++++++++
>   .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   |  4 ++
>   .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   |  8 ++++
>   .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 38 +++++++++++++++
>   .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 48 +++++++++++++++++++
>   5 files changed, 144 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> index 77693bf0840c..1735a96dd307 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> @@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
>   				now) ? "*" : ""));
>   		break;
>   
> +	case SMU_VCLK:
> +		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
> +		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +				i, single_dpm_table->dpm_levels[i].value,
> +				(clocks.num_levels == 1) ? "*" :
> +				(arcturus_freqs_in_same_level(
> +				clocks.data[i].clocks_in_khz / 1000,
> +				now) ? "*" : ""));
> +		break;
> +
> +	case SMU_DCLK:
> +		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
> +		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +				i, single_dpm_table->dpm_levels[i].value,
> +				(clocks.num_levels == 1) ? "*" :
> +				(arcturus_freqs_in_same_level(
> +				clocks.data[i].clocks_in_khz / 1000,
> +				now) ? "*" : ""));
> +		break;
> +
>   	case SMU_PCIE:
>   		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
>   		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 9339fd24ae8c..2e801f2e42a9 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:
>   	case SMU_DCEFCLK:
>   		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
>   		if (ret)
> @@ -1444,6 +1446,8 @@ static int navi10_force_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:

This is related to forcing clock levels, commit messages only mentions 
about display. Skip this or modify commit message accordingly.

>   		/* There is only 2 levels for fine grained DPM */
>   		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
>   			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 0c40a54c46d7..6da6d08d8858 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_VCLK1:
> +	case SMU_DCLK:
> +	case SMU_DCLK1:
>   	case SMU_DCEFCLK:
>   		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
>   		if (ret)
> @@ -1150,6 +1154,10 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_VCLK1:
> +	case SMU_DCLK:
> +	case SMU_DCLK1:
>   		/* There is only 2 levels for fine grained DPM */
>   		if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
>   			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> index f43b4c623685..3a6b52b7b647 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> @@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
>   	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
>   	CLK_MAP(UCLK, CLOCK_FCLK),
>   	CLK_MAP(MCLK, CLOCK_FCLK),
> +	CLK_MAP(VCLK, CLOCK_VCLK),
> +	CLK_MAP(DCLK, CLOCK_DCLK),
>   };
>   
>   static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
> @@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
>   			return -EINVAL;
>   		*freq = clk_table->FClocks[dpm_level].Freq;
>   		break;
> +	case SMU_VCLK:
> +		if (dpm_level >= NUM_VCN_DPM_LEVELS)
> +			return -EINVAL;
> +		*freq = clk_table->VClocks[dpm_level].Freq;
> +		break;
> +	case SMU_DCLK:
> +		if (dpm_level >= NUM_VCN_DPM_LEVELS)
> +			return -EINVAL;
> +		*freq = clk_table->DClocks[dpm_level].Freq;
> +		break;
> +
>   	default:
>   		return -EINVAL;
>   	}
> @@ -296,6 +309,8 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
>   		case SMU_UCLK:
>   		case SMU_FCLK:
>   		case SMU_MCLK:
> +		case SMU_VCLK:
> +		case SMU_DCLK:
>   			ret = renoir_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);

Please double check if this is the right place for V/D clocks. mclk_mask 
says this is related to memory clock values only.

>   			if (ret)
>   				goto failed;
> @@ -324,6 +339,8 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
>   		case SMU_UCLK:
>   		case SMU_FCLK:
>   		case SMU_MCLK:
> +		case SMU_DCLK:
> +		case SMU_VCLK:
>   			ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min);

Please double check if this is the right place for V/D clocks. 
NUM_MEMCLK_DPM_LEVELS says this is related to memory clock values only.

>   			if (ret)
>   				goto failed;
> @@ -532,6 +549,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
>   		count = NUM_FCLK_DPM_LEVELS;
>   		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
>   		break;
> +	case SMU_VCLK:
> +		count = NUM_VCN_DPM_LEVELS;
> +		cur_value = metrics.ClockFrequency[CLOCK_VCLK];
> +		break;
> +	case SMU_DCLK:
> +		count = NUM_VCN_DPM_LEVELS;
> +		cur_value = metrics.ClockFrequency[CLOCK_DCLK];
> +		break;
>   	default:
>   		break;
>   	}
> @@ -543,6 +568,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_DCEFCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:
>   		for (i = 0; i < count; i++) {
>   			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
>   			if (ret)
> @@ -730,6 +757,17 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
>   		clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
>   	}
>   
> +	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
> +		clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
> +		clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
> +	}
> +
> +	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
> +		clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
> +		clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
> +	}
> +
> +
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> index 7c191a5d6db9..bc628326776c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> @@ -816,6 +816,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
>   								       now) ? "*" : ""));
>   		break;
>   
> +	case SMU_VCLK:
> +		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
> +		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +					i, single_dpm_table->dpm_levels[i].value,
> +					(clocks.num_levels == 1) ? "*" :
> +					(aldebaran_freqs_in_same_level(
> +								       clocks.data[i].clocks_in_khz / 1000,
> +								       now) ? "*" : ""));
> +		break;
> +
> +	case SMU_DCLK:
> +		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
> +		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +					i, single_dpm_table->dpm_levels[i].value,
> +					(clocks.num_levels == 1) ? "*" :
> +					(aldebaran_freqs_in_same_level(
> +								       clocks.data[i].clocks_in_khz / 1000,
> +								       now) ? "*" : ""));
> +		break;
> +
>   	default:
>   		break;
>   	}
> @@ -920,6 +966,8 @@ static int aldebaran_force_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_SOCCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:

V/D Clock forcing is not applicable on Aldebaran. No need to add 
anything here.

>   		/*
>   		 * Should not arrive here since aldebaran does not
>   		 * support mclk/socclk/fclk softmin/softmax settings
> 

-- 
Thanks,
Lijo
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 1/3] drm/amdgpu/pm: Update metrics table
  2021-05-19  5:43         ` Lijo Lazar
@ 2021-05-19  6:02           ` David M Nieto
  2021-05-19  6:02             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
                               ` (2 more replies)
  2021-05-19 17:39           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table (v2) David M Nieto
  2021-05-20  5:16           ` [PATCH] drm/amdgpu/pm: display vcn pp dpm (v4) David M Nieto
  2 siblings, 3 replies; 19+ messages in thread
From: David M Nieto @ 2021-05-19  6:02 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

expand metrics table with voltages and frequency ranges

Signed-off-by: David M Nieto <david.nieto@amd.com>
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    | 69 +++++++++++++++++++
 drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c        |  3 +
 2 files changed, 72 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2d13131a432..b1cd52a9d684 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -536,6 +536,75 @@ struct gpu_metrics_v1_2 {
 	uint64_t			firmware_timestamp;
 };
 
+struct gpu_metrics_v1_3 {
+	struct metrics_table_header	common_header;
+
+	/* Temperature */
+	uint16_t			temperature_edge;
+	uint16_t			temperature_hotspot;
+	uint16_t			temperature_mem;
+	uint16_t			temperature_vrgfx;
+	uint16_t			temperature_vrsoc;
+	uint16_t			temperature_vrmem;
+
+	/* Utilization */
+	uint16_t			average_gfx_activity;
+	uint16_t			average_umc_activity; // memory controller
+	uint16_t			average_mm_activity; // UVD or VCN
+
+	/* Power/Energy */
+	uint16_t			average_socket_power;
+	uint64_t			energy_accumulator;
+
+	/* Driver attached timestamp (in ns) */
+	uint64_t			system_clock_counter;
+
+	/* Average clocks */
+	uint16_t			average_gfxclk_frequency;
+	uint16_t			average_socclk_frequency;
+	uint16_t			average_uclk_frequency;
+	uint16_t			average_vclk0_frequency;
+	uint16_t			average_dclk0_frequency;
+	uint16_t			average_vclk1_frequency;
+	uint16_t			average_dclk1_frequency;
+
+	/* Current clocks */
+	uint16_t			current_gfxclk;
+	uint16_t			current_socclk;
+	uint16_t			current_uclk;
+	uint16_t			current_vclk0;
+	uint16_t			current_dclk0;
+	uint16_t			current_vclk1;
+	uint16_t			current_dclk1;
+
+	/* Throttle status */
+	uint32_t			throttle_status;
+
+	/* Fans */
+	uint16_t			current_fan_speed;
+
+	/* Link width/speed */
+	uint16_t			pcie_link_width;
+	uint16_t			pcie_link_speed; // in 0.1 GT/s
+
+	uint16_t			padding;
+
+	uint32_t			gfx_activity_acc;
+	uint32_t			mem_activity_acc;
+
+	uint16_t			temperature_hbm[NUM_HBM_INSTANCES];
+
+	/* PMFW attached timestamp (10ns resolution) */
+	uint64_t			firmware_timestamp;
+
+	/* Voltage (mV) */
+	uint16_t			voltage_soc;
+	uint16_t			voltage_gfx;
+	uint16_t			voltage_mem;
+
+	uint16_t			padding1;
+};
+
 /*
  * gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
  * Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 0934e5b3aa17..0ceb7329838c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -764,6 +764,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
 	case METRICS_VERSION(1, 2):
 		structure_size = sizeof(struct gpu_metrics_v1_2);
 		break;
+	case METRICS_VERSION(1, 3):
+		structure_size = sizeof(struct gpu_metrics_v1_3);
+		break;
 	case METRICS_VERSION(2, 0):
 		structure_size = sizeof(struct gpu_metrics_v2_0);
 		break;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x
  2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
@ 2021-05-19  6:02             ` David M Nieto
  2021-05-19 15:43               ` Lijo Lazar
  2021-05-19  6:02             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
  2021-05-19 15:42             ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table Lijo Lazar
  2 siblings, 1 reply; 19+ messages in thread
From: David M Nieto @ 2021-05-19  6:02 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Fill voltage fields in metrics table

Signed-off-by: David M Nieto <david.nieto@amd.com>
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 58 +++++++++++++------
 1 file changed, 41 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..b8971303a873 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
 		goto err0_out;
 	smu_table->metrics_time = 0;
 
-	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
 	if (!smu_table->gpu_metrics_table)
 		goto err1_out;
@@ -2627,8 +2627,8 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_legacy_t metrics;
 	int ret = 0;
 
@@ -2646,7 +2646,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2681,17 +2681,23 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_t metrics;
 	int ret = 0;
 
@@ -2709,7 +2715,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2746,17 +2752,23 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_legacy_t metrics;
 	int ret = 0;
 
@@ -2774,7 +2786,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2814,17 +2826,23 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_t metrics;
 	int ret = 0;
 
@@ -2842,7 +2860,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2884,9 +2902,15 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm
  2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
  2021-05-19  6:02             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
@ 2021-05-19  6:02             ` David M Nieto
  2021-05-19 15:44               ` Lijo Lazar
  2021-05-19 15:42             ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table Lijo Lazar
  2 siblings, 1 reply; 19+ messages in thread
From: David M Nieto @ 2021-05-19  6:02 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Enable displaying DPM levels for VCN clocks
in swsmu supported ASICs

Signed-off-by: David M Nieto <david.nieto@amd.com>
---
 .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 46 +++++++++++++++++++
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   |  2 +
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   |  8 ++++
 .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 34 ++++++++++++++
 .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 46 +++++++++++++++++++
 5 files changed, 136 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 77693bf0840c..1735a96dd307 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
 				now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
 	case SMU_PCIE:
 		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
 		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index b8971303a873..7763de464678 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 	case SMU_DCEFCLK:
 		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 0c40a54c46d7..6da6d08d8858 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_VCLK1:
+	case SMU_DCLK:
+	case SMU_DCLK1:
 	case SMU_DCEFCLK:
 		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
@@ -1150,6 +1154,10 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_VCLK1:
+	case SMU_DCLK:
+	case SMU_DCLK1:
 		/* There is only 2 levels for fine grained DPM */
 		if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index f43b4c623685..0805dc439572 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
 	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
 	CLK_MAP(UCLK, CLOCK_FCLK),
 	CLK_MAP(MCLK, CLOCK_FCLK),
+	CLK_MAP(VCLK, CLOCK_VCLK),
+	CLK_MAP(DCLK, CLOCK_DCLK),
 };
 
 static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
 			return -EINVAL;
 		*freq = clk_table->FClocks[dpm_level].Freq;
 		break;
+	case SMU_VCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->VClocks[dpm_level].Freq;
+		break;
+	case SMU_DCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->DClocks[dpm_level].Freq;
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -532,6 +545,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 		count = NUM_FCLK_DPM_LEVELS;
 		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
 		break;
+	case SMU_VCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_VCLK];
+		break;
+	case SMU_DCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_DCLK];
+		break;
 	default:
 		break;
 	}
@@ -543,6 +564,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_DCEFCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 		for (i = 0; i < count; i++) {
 			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
 			if (ret)
@@ -730,6 +753,17 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
 		clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
 	}
 
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
+		clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
+	}
+
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
+		clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
+	}
+
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 7c191a5d6db9..fb744f3e17d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -816,6 +816,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
 								       now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
 	default:
 		break;
 	}
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 1/3] drm/amdgpu/pm: Update metrics table
  2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
  2021-05-19  6:02             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
  2021-05-19  6:02             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
@ 2021-05-19 15:42             ` Lijo Lazar
  2 siblings, 0 replies; 19+ messages in thread
From: Lijo Lazar @ 2021-05-19 15:42 UTC (permalink / raw)
  To: David M Nieto, amd-gfx

Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>


On 5/19/2021 11:32 AM, David M Nieto wrote:
> expand metrics table with voltages and frequency ranges
> 
> Signed-off-by: David M Nieto <david.nieto@amd.com>
> ---
>   .../gpu/drm/amd/include/kgd_pp_interface.h    | 69 +++++++++++++++++++
>   drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c        |  3 +
>   2 files changed, 72 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index e2d13131a432..b1cd52a9d684 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -536,6 +536,75 @@ struct gpu_metrics_v1_2 {
>   	uint64_t			firmware_timestamp;
>   };
>   
> +struct gpu_metrics_v1_3 {
> +	struct metrics_table_header	common_header;
> +
> +	/* Temperature */
> +	uint16_t			temperature_edge;
> +	uint16_t			temperature_hotspot;
> +	uint16_t			temperature_mem;
> +	uint16_t			temperature_vrgfx;
> +	uint16_t			temperature_vrsoc;
> +	uint16_t			temperature_vrmem;
> +
> +	/* Utilization */
> +	uint16_t			average_gfx_activity;
> +	uint16_t			average_umc_activity; // memory controller
> +	uint16_t			average_mm_activity; // UVD or VCN
> +
> +	/* Power/Energy */
> +	uint16_t			average_socket_power;
> +	uint64_t			energy_accumulator;
> +
> +	/* Driver attached timestamp (in ns) */
> +	uint64_t			system_clock_counter;
> +
> +	/* Average clocks */
> +	uint16_t			average_gfxclk_frequency;
> +	uint16_t			average_socclk_frequency;
> +	uint16_t			average_uclk_frequency;
> +	uint16_t			average_vclk0_frequency;
> +	uint16_t			average_dclk0_frequency;
> +	uint16_t			average_vclk1_frequency;
> +	uint16_t			average_dclk1_frequency;
> +
> +	/* Current clocks */
> +	uint16_t			current_gfxclk;
> +	uint16_t			current_socclk;
> +	uint16_t			current_uclk;
> +	uint16_t			current_vclk0;
> +	uint16_t			current_dclk0;
> +	uint16_t			current_vclk1;
> +	uint16_t			current_dclk1;
> +
> +	/* Throttle status */
> +	uint32_t			throttle_status;
> +
> +	/* Fans */
> +	uint16_t			current_fan_speed;
> +
> +	/* Link width/speed */
> +	uint16_t			pcie_link_width;
> +	uint16_t			pcie_link_speed; // in 0.1 GT/s
> +
> +	uint16_t			padding;
> +
> +	uint32_t			gfx_activity_acc;
> +	uint32_t			mem_activity_acc;
> +
> +	uint16_t			temperature_hbm[NUM_HBM_INSTANCES];
> +
> +	/* PMFW attached timestamp (10ns resolution) */
> +	uint64_t			firmware_timestamp;
> +
> +	/* Voltage (mV) */
> +	uint16_t			voltage_soc;
> +	uint16_t			voltage_gfx;
> +	uint16_t			voltage_mem;
> +
> +	uint16_t			padding1;
> +};
> +
>   /*
>    * gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
>    * Use gpu_metrics_v2_1 or later instead.
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
> index 0934e5b3aa17..0ceb7329838c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
> @@ -764,6 +764,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
>   	case METRICS_VERSION(1, 2):
>   		structure_size = sizeof(struct gpu_metrics_v1_2);
>   		break;
> +	case METRICS_VERSION(1, 3):
> +		structure_size = sizeof(struct gpu_metrics_v1_3);
> +		break;
>   	case METRICS_VERSION(2, 0):
>   		structure_size = sizeof(struct gpu_metrics_v2_0);
>   		break;
> 

-- 
Thanks,
Lijo
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x
  2021-05-19  6:02             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
@ 2021-05-19 15:43               ` Lijo Lazar
  0 siblings, 0 replies; 19+ messages in thread
From: Lijo Lazar @ 2021-05-19 15:43 UTC (permalink / raw)
  To: David M Nieto, amd-gfx

Add a check of non-zero offsets so that it doesn't show a static voltage 
of 1.55v all the time. With that addressed the patch is

Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>

On 5/19/2021 11:32 AM, David M Nieto wrote:
> Fill voltage fields in metrics table
> 
> Signed-off-by: David M Nieto <david.nieto@amd.com>
> ---
>   .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 58 +++++++++++++------
>   1 file changed, 41 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index ac13042672ea..b8971303a873 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
>   		goto err0_out;
>   	smu_table->metrics_time = 0;
>   
> -	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
> +	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
>   	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
>   	if (!smu_table->gpu_metrics_table)
>   		goto err1_out;
> @@ -2627,8 +2627,8 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
>   					     void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_legacy_t metrics;
>   	int ret = 0;
>   
> @@ -2646,7 +2646,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2681,17 +2681,23 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +
>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
>   				      void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_t metrics;
>   	int ret = 0;
>   
> @@ -2709,7 +2715,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2746,17 +2752,23 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +
>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
>   					     void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_NV12_legacy_t metrics;
>   	int ret = 0;
>   
> @@ -2774,7 +2786,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2814,17 +2826,23 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +
>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
>   				      void **table)
>   {
>   	struct smu_table_context *smu_table = &smu->smu_table;
> -	struct gpu_metrics_v1_1 *gpu_metrics =
> -		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
> +	struct gpu_metrics_v1_3 *gpu_metrics =
> +		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>   	SmuMetrics_NV12_t metrics;
>   	int ret = 0;
>   
> @@ -2842,7 +2860,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
>   
>   	mutex_unlock(&smu->metrics_lock);
>   
> -	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
> +	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>   
>   	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
>   	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> @@ -2884,9 +2902,15 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
>   
>   	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>   
> +	gpu_metrics->voltage_gfx = (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
> +	gpu_metrics->voltage_mem = (155000 - 625 * metrics.CurrMemVidOffset) / 100;
> +	gpu_metrics->voltage_soc = (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
> +
>   	*table = (void *)gpu_metrics;
>   
> -	return sizeof(struct gpu_metrics_v1_1);
> +	return sizeof(struct gpu_metrics_v1_3);
> +out:
> +	return ret;
>   }
>   
>   static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
> 

-- 
Thanks,
Lijo
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm
  2021-05-19  6:02             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
@ 2021-05-19 15:44               ` Lijo Lazar
  0 siblings, 0 replies; 19+ messages in thread
From: Lijo Lazar @ 2021-05-19 15:44 UTC (permalink / raw)
  To: David M Nieto, amd-gfx


Avoid changes to sienna_cichlid_force_clk_levels as well.
With that addressed patch is

Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>

On 5/19/2021 11:32 AM, David M Nieto wrote:
> Enable displaying DPM levels for VCN clocks
> in swsmu supported ASICs
> 
> Signed-off-by: David M Nieto <david.nieto@amd.com>
> ---
>   .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 46 +++++++++++++++++++
>   .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   |  2 +
>   .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   |  8 ++++
>   .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 34 ++++++++++++++
>   .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 46 +++++++++++++++++++
>   5 files changed, 136 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> index 77693bf0840c..1735a96dd307 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> @@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
>   				now) ? "*" : ""));
>   		break;
>   
> +	case SMU_VCLK:
> +		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
> +		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +				i, single_dpm_table->dpm_levels[i].value,
> +				(clocks.num_levels == 1) ? "*" :
> +				(arcturus_freqs_in_same_level(
> +				clocks.data[i].clocks_in_khz / 1000,
> +				now) ? "*" : ""));
> +		break;
> +
> +	case SMU_DCLK:
> +		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
> +		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +				i, single_dpm_table->dpm_levels[i].value,
> +				(clocks.num_levels == 1) ? "*" :
> +				(arcturus_freqs_in_same_level(
> +				clocks.data[i].clocks_in_khz / 1000,
> +				now) ? "*" : ""));
> +		break;
> +
>   	case SMU_PCIE:
>   		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
>   		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index b8971303a873..7763de464678 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:
>   	case SMU_DCEFCLK:
>   		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
>   		if (ret)
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 0c40a54c46d7..6da6d08d8858 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_VCLK1:
> +	case SMU_DCLK:
> +	case SMU_DCLK1:
>   	case SMU_DCEFCLK:
>   		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
>   		if (ret)
> @@ -1150,6 +1154,10 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_VCLK1:
> +	case SMU_DCLK:
> +	case SMU_DCLK1:
>   		/* There is only 2 levels for fine grained DPM */
>   		if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
>   			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> index f43b4c623685..0805dc439572 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> @@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
>   	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
>   	CLK_MAP(UCLK, CLOCK_FCLK),
>   	CLK_MAP(MCLK, CLOCK_FCLK),
> +	CLK_MAP(VCLK, CLOCK_VCLK),
> +	CLK_MAP(DCLK, CLOCK_DCLK),
>   };
>   
>   static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
> @@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
>   			return -EINVAL;
>   		*freq = clk_table->FClocks[dpm_level].Freq;
>   		break;
> +	case SMU_VCLK:
> +		if (dpm_level >= NUM_VCN_DPM_LEVELS)
> +			return -EINVAL;
> +		*freq = clk_table->VClocks[dpm_level].Freq;
> +		break;
> +	case SMU_DCLK:
> +		if (dpm_level >= NUM_VCN_DPM_LEVELS)
> +			return -EINVAL;
> +		*freq = clk_table->DClocks[dpm_level].Freq;
> +		break;
> +
>   	default:
>   		return -EINVAL;
>   	}
> @@ -532,6 +545,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
>   		count = NUM_FCLK_DPM_LEVELS;
>   		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
>   		break;
> +	case SMU_VCLK:
> +		count = NUM_VCN_DPM_LEVELS;
> +		cur_value = metrics.ClockFrequency[CLOCK_VCLK];
> +		break;
> +	case SMU_DCLK:
> +		count = NUM_VCN_DPM_LEVELS;
> +		cur_value = metrics.ClockFrequency[CLOCK_DCLK];
> +		break;
>   	default:
>   		break;
>   	}
> @@ -543,6 +564,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_DCEFCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:
>   		for (i = 0; i < count; i++) {
>   			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
>   			if (ret)
> @@ -730,6 +753,17 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
>   		clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
>   	}
>   
> +	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
> +		clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
> +		clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
> +	}
> +
> +	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
> +		clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
> +		clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
> +	}
> +
> +
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> index 7c191a5d6db9..fb744f3e17d7 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> @@ -816,6 +816,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
>   								       now) ? "*" : ""));
>   		break;
>   
> +	case SMU_VCLK:
> +		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
> +		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +					i, single_dpm_table->dpm_levels[i].value,
> +					(clocks.num_levels == 1) ? "*" :
> +					(aldebaran_freqs_in_same_level(
> +								       clocks.data[i].clocks_in_khz / 1000,
> +								       now) ? "*" : ""));
> +		break;
> +
> +	case SMU_DCLK:
> +		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
> +		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +					i, single_dpm_table->dpm_levels[i].value,
> +					(clocks.num_levels == 1) ? "*" :
> +					(aldebaran_freqs_in_same_level(
> +								       clocks.data[i].clocks_in_khz / 1000,
> +								       now) ? "*" : ""));
> +		break;
> +
>   	default:
>   		break;
>   	}
> 

-- 
Thanks,
Lijo
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 1/3] drm/amdgpu/pm: Update metrics table (v2)
  2021-05-19  5:43         ` Lijo Lazar
  2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
@ 2021-05-19 17:39           ` David M Nieto
  2021-05-19 17:39             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x (v3) David M Nieto
  2021-05-19 17:39             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm (v3) David M Nieto
  2021-05-20  5:16           ` [PATCH] drm/amdgpu/pm: display vcn pp dpm (v4) David M Nieto
  2 siblings, 2 replies; 19+ messages in thread
From: David M Nieto @ 2021-05-19 17:39 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

v2: removed static dpm and frequency ranges from table

expand metrics table with voltages and frequency ranges

Signed-off-by: David M Nieto <david.nieto@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    | 69 +++++++++++++++++++
 drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c        |  3 +
 2 files changed, 72 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2d13131a432..b1cd52a9d684 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -536,6 +536,75 @@ struct gpu_metrics_v1_2 {
 	uint64_t			firmware_timestamp;
 };
 
+struct gpu_metrics_v1_3 {
+	struct metrics_table_header	common_header;
+
+	/* Temperature */
+	uint16_t			temperature_edge;
+	uint16_t			temperature_hotspot;
+	uint16_t			temperature_mem;
+	uint16_t			temperature_vrgfx;
+	uint16_t			temperature_vrsoc;
+	uint16_t			temperature_vrmem;
+
+	/* Utilization */
+	uint16_t			average_gfx_activity;
+	uint16_t			average_umc_activity; // memory controller
+	uint16_t			average_mm_activity; // UVD or VCN
+
+	/* Power/Energy */
+	uint16_t			average_socket_power;
+	uint64_t			energy_accumulator;
+
+	/* Driver attached timestamp (in ns) */
+	uint64_t			system_clock_counter;
+
+	/* Average clocks */
+	uint16_t			average_gfxclk_frequency;
+	uint16_t			average_socclk_frequency;
+	uint16_t			average_uclk_frequency;
+	uint16_t			average_vclk0_frequency;
+	uint16_t			average_dclk0_frequency;
+	uint16_t			average_vclk1_frequency;
+	uint16_t			average_dclk1_frequency;
+
+	/* Current clocks */
+	uint16_t			current_gfxclk;
+	uint16_t			current_socclk;
+	uint16_t			current_uclk;
+	uint16_t			current_vclk0;
+	uint16_t			current_dclk0;
+	uint16_t			current_vclk1;
+	uint16_t			current_dclk1;
+
+	/* Throttle status */
+	uint32_t			throttle_status;
+
+	/* Fans */
+	uint16_t			current_fan_speed;
+
+	/* Link width/speed */
+	uint16_t			pcie_link_width;
+	uint16_t			pcie_link_speed; // in 0.1 GT/s
+
+	uint16_t			padding;
+
+	uint32_t			gfx_activity_acc;
+	uint32_t			mem_activity_acc;
+
+	uint16_t			temperature_hbm[NUM_HBM_INSTANCES];
+
+	/* PMFW attached timestamp (10ns resolution) */
+	uint64_t			firmware_timestamp;
+
+	/* Voltage (mV) */
+	uint16_t			voltage_soc;
+	uint16_t			voltage_gfx;
+	uint16_t			voltage_mem;
+
+	uint16_t			padding1;
+};
+
 /*
  * gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
  * Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 0934e5b3aa17..0ceb7329838c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -764,6 +764,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
 	case METRICS_VERSION(1, 2):
 		structure_size = sizeof(struct gpu_metrics_v1_2);
 		break;
+	case METRICS_VERSION(1, 3):
+		structure_size = sizeof(struct gpu_metrics_v1_3);
+		break;
 	case METRICS_VERSION(2, 0):
 		structure_size = sizeof(struct gpu_metrics_v2_0);
 		break;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x (v3)
  2021-05-19 17:39           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table (v2) David M Nieto
@ 2021-05-19 17:39             ` David M Nieto
  2021-05-19 17:39             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm (v3) David M Nieto
  1 sibling, 0 replies; 19+ messages in thread
From: David M Nieto @ 2021-05-19 17:39 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Fill voltage fields in metrics table

v2: Removed dpm and freq ranges info
v3: Added check to ensure volrage offset is not zero

Signed-off-by: David M Nieto <david.nieto@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
---
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 82 +++++++++++++++----
 1 file changed, 65 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..30f585afeddd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
 		goto err0_out;
 	smu_table->metrics_time = 0;
 
-	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
 	if (!smu_table->gpu_metrics_table)
 		goto err1_out;
@@ -2627,8 +2627,8 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_legacy_t metrics;
 	int ret = 0;
 
@@ -2646,7 +2646,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2681,17 +2681,29 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	if (metrics.CurrGfxVoltageOffset)
+		gpu_metrics->voltage_gfx =
+			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	if (metrics.CurrMemVidOffset)
+		gpu_metrics->voltage_mem =
+			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	if (metrics.CurrSocVoltageOffset)
+		gpu_metrics->voltage_soc =
+			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_t metrics;
 	int ret = 0;
 
@@ -2709,7 +2721,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2746,17 +2758,29 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	if (metrics.CurrGfxVoltageOffset)
+		gpu_metrics->voltage_gfx =
+			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	if (metrics.CurrMemVidOffset)
+		gpu_metrics->voltage_mem =
+			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	if (metrics.CurrSocVoltageOffset)
+		gpu_metrics->voltage_soc =
+			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 					     void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_legacy_t metrics;
 	int ret = 0;
 
@@ -2774,7 +2798,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2814,17 +2838,29 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	if (metrics.CurrGfxVoltageOffset)
+		gpu_metrics->voltage_gfx =
+			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	if (metrics.CurrMemVidOffset)
+		gpu_metrics->voltage_mem =
+			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	if (metrics.CurrSocVoltageOffset)
+		gpu_metrics->voltage_soc =
+			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 				      void **table)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct gpu_metrics_v1_1 *gpu_metrics =
-		(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+	struct gpu_metrics_v1_3 *gpu_metrics =
+		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
 	SmuMetrics_NV12_t metrics;
 	int ret = 0;
 
@@ -2842,7 +2878,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_unlock(&smu->metrics_lock);
 
-	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2884,9 +2920,21 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+	if (metrics.CurrGfxVoltageOffset)
+		gpu_metrics->voltage_gfx =
+			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+	if (metrics.CurrMemVidOffset)
+		gpu_metrics->voltage_mem =
+			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
+	if (metrics.CurrSocVoltageOffset)
+		gpu_metrics->voltage_soc =
+			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
 	*table = (void *)gpu_metrics;
 
-	return sizeof(struct gpu_metrics_v1_1);
+	return sizeof(struct gpu_metrics_v1_3);
+out:
+	return ret;
 }
 
 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm (v3)
  2021-05-19 17:39           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table (v2) David M Nieto
  2021-05-19 17:39             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x (v3) David M Nieto
@ 2021-05-19 17:39             ` David M Nieto
  1 sibling, 0 replies; 19+ messages in thread
From: David M Nieto @ 2021-05-19 17:39 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Enable displaying DPM levels for VCN clocks
in swsmu supported ASICs

v2: removed set functions for navi, renoir
v3: removed set function from arcturus

Signed-off-by: David M Nieto <david.nieto@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
---
 .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 46 +++++++++++++++++++
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   |  2 +
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   |  4 ++
 .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 34 ++++++++++++++
 .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 46 +++++++++++++++++++
 5 files changed, 132 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 77693bf0840c..1735a96dd307 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
 				now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
 	case SMU_PCIE:
 		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
 		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 30f585afeddd..e81310a424e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 	case SMU_DCEFCLK:
 		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 0c40a54c46d7..b09c253b9db6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_VCLK1:
+	case SMU_DCLK:
+	case SMU_DCLK1:
 	case SMU_DCEFCLK:
 		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index f43b4c623685..0805dc439572 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
 	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
 	CLK_MAP(UCLK, CLOCK_FCLK),
 	CLK_MAP(MCLK, CLOCK_FCLK),
+	CLK_MAP(VCLK, CLOCK_VCLK),
+	CLK_MAP(DCLK, CLOCK_DCLK),
 };
 
 static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
 			return -EINVAL;
 		*freq = clk_table->FClocks[dpm_level].Freq;
 		break;
+	case SMU_VCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->VClocks[dpm_level].Freq;
+		break;
+	case SMU_DCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->DClocks[dpm_level].Freq;
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -532,6 +545,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 		count = NUM_FCLK_DPM_LEVELS;
 		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
 		break;
+	case SMU_VCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_VCLK];
+		break;
+	case SMU_DCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_DCLK];
+		break;
 	default:
 		break;
 	}
@@ -543,6 +564,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_DCEFCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 		for (i = 0; i < count; i++) {
 			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
 			if (ret)
@@ -730,6 +753,17 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
 		clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
 	}
 
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
+		clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
+	}
+
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
+		clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
+	}
+
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 7c191a5d6db9..fb744f3e17d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -816,6 +816,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
 								       now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
 	default:
 		break;
 	}
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH] drm/amdgpu/pm: display vcn pp dpm (v4)
  2021-05-19  5:43         ` Lijo Lazar
  2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
  2021-05-19 17:39           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table (v2) David M Nieto
@ 2021-05-20  5:16           ` David M Nieto
  2 siblings, 0 replies; 19+ messages in thread
From: David M Nieto @ 2021-05-20  5:16 UTC (permalink / raw)
  To: amd-gfx; +Cc: David M Nieto

Enable displaying DPM levels for VCN clocks
in swsmu supported ASICs

v2: removed set functions for navi, renoir
v3: removed set function from arcturus
v4: added missing defines in drm_table and remove
 uneeded goto label in navi10_ppt.c

Signed-off-by: David M Nieto <david.nieto@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Change-Id: I01959a97b02aa87a6deb4a89010858cc93838cd7
---
 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h    |  4 ++
 .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 46 +++++++++++++++++++
 .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   | 10 +---
 .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   |  4 ++
 .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 33 +++++++++++++
 .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 46 +++++++++++++++++++
 6 files changed, 135 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index fb41140e8381..4440d08743aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -245,6 +245,8 @@ struct pp_smu_funcs_nv {
 #define PP_SMU_NUM_DCFCLK_DPM_LEVELS  8
 #define PP_SMU_NUM_FCLK_DPM_LEVELS    4
 #define PP_SMU_NUM_MEMCLK_DPM_LEVELS  4
+#define PP_SMU_NUM_DCLK_DPM_LEVELS    8
+#define PP_SMU_NUM_VCLK_DPM_LEVELS    8
 
 struct dpm_clock {
   uint32_t  Freq;    // In MHz
@@ -258,6 +260,8 @@ struct dpm_clocks {
 	struct dpm_clock SocClocks[PP_SMU_NUM_SOCCLK_DPM_LEVELS];
 	struct dpm_clock FClocks[PP_SMU_NUM_FCLK_DPM_LEVELS];
 	struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
+	struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS];
+	struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS];
 };
 
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 77693bf0840c..1735a96dd307 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
 				now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+				i, single_dpm_table->dpm_levels[i].value,
+				(clocks.num_levels == 1) ? "*" :
+				(arcturus_freqs_in_same_level(
+				clocks.data[i].clocks_in_khz / 1000,
+				now) ? "*" : ""));
+		break;
+
 	case SMU_PCIE:
 		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
 		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 770b181239a3..78fe13183e8b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 	case SMU_DCEFCLK:
 		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
@@ -2694,8 +2696,6 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v1_3);
-out:
-	return ret;
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
@@ -2771,8 +2771,6 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v1_3);
-out:
-	return ret;
 }
 
 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
@@ -2851,8 +2849,6 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v1_3);
-out:
-	return ret;
 }
 
 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
@@ -2933,8 +2929,6 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 	*table = (void *)gpu_metrics;
 
 	return sizeof(struct gpu_metrics_v1_3);
-out:
-	return ret;
 }
 
 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d98fb8bc084b..75acdb80c499 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_UCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_VCLK1:
+	case SMU_DCLK:
+	case SMU_DCLK1:
 	case SMU_DCEFCLK:
 		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
 		if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index f43b4c623685..1c399c4ab4dc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
 	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
 	CLK_MAP(UCLK, CLOCK_FCLK),
 	CLK_MAP(MCLK, CLOCK_FCLK),
+	CLK_MAP(VCLK, CLOCK_VCLK),
+	CLK_MAP(DCLK, CLOCK_DCLK),
 };
 
 static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
 			return -EINVAL;
 		*freq = clk_table->FClocks[dpm_level].Freq;
 		break;
+	case SMU_VCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->VClocks[dpm_level].Freq;
+		break;
+	case SMU_DCLK:
+		if (dpm_level >= NUM_VCN_DPM_LEVELS)
+			return -EINVAL;
+		*freq = clk_table->DClocks[dpm_level].Freq;
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -532,6 +545,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 		count = NUM_FCLK_DPM_LEVELS;
 		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
 		break;
+	case SMU_VCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_VCLK];
+		break;
+	case SMU_DCLK:
+		count = NUM_VCN_DPM_LEVELS;
+		cur_value = metrics.ClockFrequency[CLOCK_DCLK];
+		break;
 	default:
 		break;
 	}
@@ -543,6 +564,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 	case SMU_MCLK:
 	case SMU_DCEFCLK:
 	case SMU_FCLK:
+	case SMU_VCLK:
+	case SMU_DCLK:
 		for (i = 0; i < count; i++) {
 			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
 			if (ret)
@@ -730,6 +753,16 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
 		clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
 	}
 
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
+		clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
+	}
+
+	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+		clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
+		clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 7c191a5d6db9..fb744f3e17d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -816,6 +816,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
 								       now) ? "*" : ""));
 		break;
 
+	case SMU_VCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
+	case SMU_DCLK:
+		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+			return ret;
+		}
+
+		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+		if (ret) {
+			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+			return ret;
+		}
+
+		for (i = 0; i < single_dpm_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, single_dpm_table->dpm_levels[i].value,
+					(clocks.num_levels == 1) ? "*" :
+					(aldebaran_freqs_in_same_level(
+								       clocks.data[i].clocks_in_khz / 1000,
+								       now) ? "*" : ""));
+		break;
+
 	default:
 		break;
 	}
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2021-05-20  5:16 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-14 21:01 [PATCH 1/2] drm/amdgpu/pm: Update metrics table David M Nieto
2021-05-14 21:01 ` [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
2021-05-17  6:28   ` Lazar, Lijo
2021-05-17 20:06     ` Nieto, David M
2021-05-18  4:09     ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
2021-05-18  4:09       ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
2021-05-19  5:35         ` Lijo Lazar
2021-05-18  4:09       ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
2021-05-19  5:43         ` Lijo Lazar
2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
2021-05-19  6:02             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
2021-05-19 15:43               ` Lijo Lazar
2021-05-19  6:02             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
2021-05-19 15:44               ` Lijo Lazar
2021-05-19 15:42             ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table Lijo Lazar
2021-05-19 17:39           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table (v2) David M Nieto
2021-05-19 17:39             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x (v3) David M Nieto
2021-05-19 17:39             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm (v3) David M Nieto
2021-05-20  5:16           ` [PATCH] drm/amdgpu/pm: display vcn pp dpm (v4) David M Nieto

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.