amd-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4)
@ 2020-07-31  2:43 Evan Quan
  2020-07-31  2:43 ` [PATCH 02/17] drm/amd/powerplay: add new sysfs interface for retrieving gpu metrics(V2) Evan Quan
                   ` (15 more replies)
  0 siblings, 16 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Thus we can provide an interface for UMD to retrieve gpu metrics data.

V2: better naming and comments
V3: two structures created for dGPU and APU separately
V4: add driver attached timestamp

Change-Id: Ibc2d5c642eff732c082f8447348749a44dc35be3
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    | 108 ++++++++++++++++++
 1 file changed, 108 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index a7f92d0b3a90..5f38ee62c103 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -324,4 +324,112 @@ struct amd_pm_funcs {
 	int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
 };
 
+struct metrics_table_header {
+	uint16_t			structure_size;
+	uint8_t				format_revision;
+	uint8_t				content_revision;
+};
+
+struct gpu_metrics_v1_0 {
+	struct metrics_table_header	common_header;
+
+	/* Driver attached timestamp (in ns) */
+	uint64_t			system_clock_counter;
+
+	/* Temperature */
+	uint16_t			temperature_edge;
+	uint16_t			temperature_hotspot;
+	uint16_t			temperature_mem;
+	uint16_t			temperature_vrgfx;
+	uint16_t			temperature_vrsoc;
+	uint16_t			temperature_vrmem;
+
+	/* Utilization */
+	uint16_t			average_gfx_activity;
+	uint16_t			average_umc_activity; // memory controller
+	uint16_t			average_mm_activity; // UVD or VCN
+
+	/* Power/Energy */
+	uint16_t			average_socket_power;
+	uint32_t			energy_accumulator;
+
+	/* Average clocks */
+	uint16_t			average_gfxclk_frequency;
+	uint16_t			average_socclk_frequency;
+	uint16_t			average_uclk_frequency;
+	uint16_t			average_vclk0_frequency;
+	uint16_t			average_dclk0_frequency;
+	uint16_t			average_vclk1_frequency;
+	uint16_t			average_dclk1_frequency;
+
+	/* Current clocks */
+	uint16_t			current_gfxclk;
+	uint16_t			current_socclk;
+	uint16_t			current_uclk;
+	uint16_t			current_vclk0;
+	uint16_t			current_dclk0;
+	uint16_t			current_vclk1;
+	uint16_t			current_dclk1;
+
+	/* Throttle status */
+	uint32_t			throttle_status;
+
+	/* Fans */
+	uint16_t			current_fan_speed;
+
+	/* Link width/speed */
+	uint8_t				pcie_link_width;
+	uint8_t				pcie_link_speed; // in 0.1 GT/s
+};
+
+struct gpu_metrics_v2_0 {
+	struct metrics_table_header	common_header;
+
+	/* Driver attached timestamp (in ns) */
+	uint64_t			system_clock_counter;
+
+	/* Temperature */
+	uint16_t			temperature_gfx; // gfx temperature on APUs
+	uint16_t			temperature_soc; // soc temperature on APUs
+	uint16_t			temperature_core[8]; // CPU core temperature on APUs
+	uint16_t			temperature_l3[2];
+
+	/* Utilization */
+	uint16_t			average_gfx_activity;
+	uint16_t			average_mm_activity; // UVD or VCN
+
+	/* Power/Energy */
+	uint16_t			average_socket_power; // dGPU + APU power on A + A platform
+	uint16_t			average_cpu_power;
+	uint16_t			average_soc_power;
+	uint16_t			average_gfx_power;
+	uint16_t			average_core_power[8]; // CPU core power on APUs
+
+	/* Average clocks */
+	uint16_t			average_gfxclk_frequency;
+	uint16_t			average_socclk_frequency;
+	uint16_t			average_uclk_frequency;
+	uint16_t			average_fclk_frequency;
+	uint16_t			average_vclk_frequency;
+	uint16_t			average_dclk_frequency;
+
+	/* Current clocks */
+	uint16_t			current_gfxclk;
+	uint16_t			current_socclk;
+	uint16_t			current_uclk;
+	uint16_t			current_fclk;
+	uint16_t			current_vclk;
+	uint16_t			current_dclk;
+	uint16_t			current_coreclk[8]; // CPU core clocks
+	uint16_t			current_l3clk[2];
+
+	/* Throttle status */
+	uint32_t			throttle_status;
+
+	/* Fans */
+	uint16_t			fan_pwm;
+
+	uint16_t			padding;
+};
+
 #endif
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 02/17] drm/amd/powerplay: add new sysfs interface for retrieving gpu metrics(V2)
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 03/17] drm/amd/powerplay: implement SMU V11 common APIs for retrieving link speed/width Evan Quan
                   ` (14 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

A new interface for UMD to retrieve gpu metrics data.

V2: rich the documentation

Change-Id: If7f3523915505c0ece0a56dfd476d2b8473440d4
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 Documentation/gpu/amdgpu.rst                  |  6 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h       |  3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c        | 57 +++++++++++++++++++
 .../gpu/drm/amd/include/kgd_pp_interface.h    |  1 +
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c    | 20 +++++++
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |  3 +
 6 files changed, 90 insertions(+)

diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
index 17112352f605..0f7679a7cf54 100644
--- a/Documentation/gpu/amdgpu.rst
+++ b/Documentation/gpu/amdgpu.rst
@@ -206,6 +206,12 @@ pp_power_profile_mode
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
    :doc: mem_busy_percent
 
+gpu_metrics
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: gpu_metrics
+
 GPU Product Information
 =======================
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index aa27fe65cdfa..b190c0af7db1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -369,6 +369,9 @@ enum amdgpu_pcie_gen {
 		((adev)->powerplay.pp_funcs->set_ppfeature_status(\
 			(adev)->powerplay.pp_handle, (ppfeatures)))
 
+#define amdgpu_dpm_get_gpu_metrics(adev, table) \
+		((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table))
+
 struct amdgpu_dpm {
 	struct amdgpu_ps        *ps;
 	/* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 6df405e6221d..0198acd320b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2122,6 +2122,59 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
 	return count;
 }
 
+/**
+ * DOC: gpu_metrics
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current gpu
+ * metrics data. The file gpu_metrics is used for this. Reading the
+ * file will dump all the current gpu metrics data.
+ *
+ * These data include temperature, frequency, engines utilization,
+ * power consume, throttler status, fan speed and cpu core statistics(
+ * available for APU only). That's it will give a snapshot of all sensors
+ * at the same time.
+ */
+static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	void *gpu_metrics;
+	ssize_t size = 0;
+	int ret;
+
+	if (amdgpu_in_reset(adev))
+		return -EPERM;
+
+	ret = pm_runtime_get_sync(ddev->dev);
+	if (ret < 0) {
+		pm_runtime_put_autosuspend(ddev->dev);
+		return ret;
+	}
+
+	down_read(&adev->reset_sem);
+	if (is_support_sw_smu(adev))
+		size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
+	else if (adev->powerplay.pp_funcs->get_gpu_metrics)
+		size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
+	up_read(&adev->reset_sem);
+
+	if (size <= 0)
+		goto out;
+
+	if (size >= PAGE_SIZE)
+		size = PAGE_SIZE - 1;
+
+	memcpy(buf, gpu_metrics, size);
+
+out:
+	pm_runtime_mark_last_busy(ddev->dev);
+	pm_runtime_put_autosuspend(ddev->dev);
+
+	return size;
+}
+
 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
 	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
 	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC),
@@ -2145,6 +2198,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
 	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC),
 	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC),
 	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC),
+	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC),
 };
 
 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
@@ -2194,6 +2248,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
 	} else if (DEVICE_ATTR_IS(pp_features)) {
 		if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
 			*states = ATTR_STATE_UNSUPPORTED;
+	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
+		if (asic_type < CHIP_VEGA12)
+			*states = ATTR_STATE_UNSUPPORTED;
 	}
 
 	if (asic_type == CHIP_ARCTURUS) {
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 5f38ee62c103..0aec28fda058 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -322,6 +322,7 @@ struct amd_pm_funcs {
 	int (*asic_reset_mode_2)(void *handle);
 	int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
 	int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
+	ssize_t (*get_gpu_metrics)(void *handle, void **table);
 };
 
 struct metrics_table_header {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 55463e7a11e2..cf9c5205ef08 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -2516,3 +2516,23 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
 
 	return ret;
 }
+
+ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu,
+				void **table)
+{
+	ssize_t size;
+
+	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+		return -EOPNOTSUPP;
+
+	if (!smu->ppt_funcs->get_gpu_metrics)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&smu->mutex);
+
+	size = smu->ppt_funcs->get_gpu_metrics(smu, table);
+
+	mutex_unlock(&smu->mutex);
+
+	return size;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index b57b10406390..a08155b83289 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -589,6 +589,7 @@ struct pptable_funcs {
 	void (*log_thermal_throttling_event)(struct smu_context *smu);
 	size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
 	int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
+	ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
 };
 
 typedef enum {
@@ -791,5 +792,7 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
 
 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
 
+ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, void **table);
+
 #endif
 #endif
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 03/17] drm/amd/powerplay: implement SMU V11 common APIs for retrieving link speed/width
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
  2020-07-31  2:43 ` [PATCH 02/17] drm/amd/powerplay: add new sysfs interface for retrieving gpu metrics(V2) Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 04/17] drm/amd/powerplay: add Arcturus support for gpu metrics export Evan Quan
                   ` (13 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

This will be shared around all SMU V11 asics.

Change-Id: Iaa4554fb0e011b9f565d89375ac7b6a7eb525420
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  8 +++
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c    |  9 +---
 drivers/gpu/drm/amd/powerplay/navi10_ppt.h    |  3 --
 .../drm/amd/powerplay/sienna_cichlid_ppt.c    |  8 +--
 .../drm/amd/powerplay/sienna_cichlid_ppt.h    |  3 --
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c     | 53 +++++++++++++++++++
 6 files changed, 65 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 6a42331aba8a..aeb12654257e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -264,5 +264,13 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
 				  uint32_t *min_value,
 				  uint32_t *max_value);
 
+int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
+
+int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
+
+int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
+
+int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
+
 #endif
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 6aaf483858a0..3a3c555f0e82 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -917,7 +917,6 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 	uint32_t gen_speed, lane_width;
 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
-	struct amdgpu_device *adev = smu->adev;
 	PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
 	OverDriveTable_t *od_table =
 		(OverDriveTable_t *)table_context->overdrive_table;
@@ -971,12 +970,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 		}
 		break;
 	case SMU_PCIE:
-		gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
-			     PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
-			>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
-		lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
-			      PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
-			>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
+		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
 		for (i = 0; i < NUM_LINK_LEVELS; i++)
 			size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 2abb4ba01db1..84dc5a1b6830 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -49,9 +49,6 @@
 
 #define NAVI10_VOLTAGE_SCALE (4)
 
-#define smnPCIE_LC_SPEED_CNTL			0x11140290
-#define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
-
 extern void navi10_set_ppt_funcs(struct smu_context *smu);
 
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
index f64a1be94cb8..c15496aea50f 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
@@ -960,12 +960,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
 		}
 		break;
 	case SMU_PCIE:
-		gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
-			     PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
-			>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
-		lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
-			      PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
-			>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+		gen_speed = smu_v11_0_get_current_pcie_link_speed(smu);
+		lane_width = smu_v11_0_get_current_pcie_link_width(smu);
 		for (i = 0; i < NUM_LINK_LEVELS; i++)
 			size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h
index 8078886e4cbc..57e120c440ea 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h
@@ -31,7 +31,4 @@ typedef enum {
 
 extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
 
-#define smnPCIE_LC_SPEED_CNTL                   0x11140290
-#define smnPCIE_LC_LINK_WIDTH_CNTL              0x11140288
-
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 26b4fff9e0d9..7f1229a0e8db 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -67,6 +67,19 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
 
 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
 
+#define LINK_WIDTH_MAX				6
+#define LINK_SPEED_MAX				3
+
+#define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
+#define smnPCIE_LC_SPEED_CNTL			0x11140290
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
+
+static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static int link_speed[] = {25, 50, 80, 160};
+
 int smu_v11_0_init_microcode(struct smu_context *smu)
 {
 	struct amdgpu_device *adev = smu->adev;
@@ -1917,3 +1930,43 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
 
 	return ret;
 }
+
+int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
+{
+	struct amdgpu_device *adev = smu->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
+{
+	uint32_t width_level;
+
+	width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
+	if (width_level > LINK_WIDTH_MAX)
+		width_level = 0;
+
+	return link_width[width_level];
+}
+
+int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
+{
+	struct amdgpu_device *adev = smu->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+		PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+		>> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
+int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
+{
+	uint32_t speed_level;
+
+	speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
+	if (speed_level > LINK_SPEED_MAX)
+		speed_level = 0;
+
+	return link_speed[speed_level];
+}
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 04/17] drm/amd/powerplay: add Arcturus support for gpu metrics export
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
  2020-07-31  2:43 ` [PATCH 02/17] drm/amd/powerplay: add new sysfs interface for retrieving gpu metrics(V2) Evan Quan
  2020-07-31  2:43 ` [PATCH 03/17] drm/amd/powerplay: implement SMU V11 common APIs for retrieving link speed/width Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 05/17] drm/amd/powerplay: update the data structure for NV12 SmuMetrics Evan Quan
                   ` (12 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Add Arcturus gpu metrics export interface.

Change-Id: I372337f31e2b7174d41fb4e3af180deb94b5ec06
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 92 +++++++++++++++++++
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |  3 +
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c     | 14 +++
 4 files changed, 111 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index f13979687b9e..39bfe0ebfea3 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -79,6 +79,8 @@
 /* possible frequency drift (1Mhz) */
 #define EPSILON				1
 
+#define smnPCIE_ESM_CTRL			0x111003D0
+
 static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
 	MSG_MAP(TestMessage,			     PPSMC_MSG_TestMessage,			0),
 	MSG_MAP(GetSmuVersion,			     PPSMC_MSG_GetSmuVersion,			1),
@@ -234,6 +236,13 @@ static int arcturus_tables_init(struct smu_context *smu)
 		return -ENOMEM;
 	smu_table->metrics_time = 0;
 
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
+	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+	if (!smu_table->gpu_metrics_table) {
+		kfree(smu_table->metrics_table);
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -2254,6 +2263,88 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
 	kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status);
 }
 
+static int arcturus_get_current_pcie_link_speed(struct smu_context *smu)
+{
+	struct amdgpu_device *adev = smu->adev;
+	uint32_t esm_ctrl;
+
+	/* TODO: confirm this on real target */
+	esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+	if ((esm_ctrl >> 15) & 0x1FFFF)
+		return (((esm_ctrl >> 8) & 0x3F) + 128);
+
+	return smu_v11_0_get_current_pcie_link_speed(smu);
+}
+
+static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
+					void **table)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct gpu_metrics_v1_0 *gpu_metrics =
+		(struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
+	SmuMetrics_t metrics;
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+
+	ret = smu_cmn_update_table(smu,
+				   SMU_TABLE_SMU_METRICS,
+				   0,
+				   smu_table->metrics_table,
+				   false);
+	if (ret) {
+		dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
+		mutex_unlock(&smu->metrics_lock);
+		return ret;
+	}
+	smu_table->metrics_time = jiffies;
+
+	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+	mutex_unlock(&smu->metrics_lock);
+
+	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+
+	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
+	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
+
+	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+	gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
+
+	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+	gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
+
+	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+	gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
+	gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
+
+	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+
+	gpu_metrics->pcie_link_width =
+			smu_v11_0_get_current_pcie_link_width(smu);
+	gpu_metrics->pcie_link_speed =
+			arcturus_get_current_pcie_link_speed(smu);
+
+	*table = (void *)gpu_metrics;
+
+	return sizeof(struct gpu_metrics_v1_0);
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
 	/* init dpm */
 	.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
@@ -2331,6 +2422,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+	.get_gpu_metrics = arcturus_get_gpu_metrics,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index a08155b83289..ec2d2aa7f4ec 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -274,6 +274,9 @@ struct smu_table_context
 
 	void				*overdrive_table;
 	void                            *boot_overdrive_table;
+
+	uint32_t			gpu_metrics_table_size;
+	void				*gpu_metrics_table;
 };
 
 struct smu_dpm_context {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index aeb12654257e..f2a522176ca0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -272,5 +272,7 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
 
 int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
 
+void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics);
+
 #endif
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 7f1229a0e8db..98b6085d4c8b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -417,10 +417,12 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 
+	kfree(smu_table->gpu_metrics_table);
 	kfree(smu_table->boot_overdrive_table);
 	kfree(smu_table->overdrive_table);
 	kfree(smu_table->max_sustainable_clocks);
 	kfree(smu_table->driver_pptable);
+	smu_table->gpu_metrics_table = NULL;
 	smu_table->boot_overdrive_table = NULL;
 	smu_table->overdrive_table = NULL;
 	smu_table->max_sustainable_clocks = NULL;
@@ -1970,3 +1972,15 @@ int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
 
 	return link_speed[speed_level];
 }
+
+void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
+{
+	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
+
+	gpu_metrics->common_header.structure_size =
+				sizeof(struct gpu_metrics_v1_0);
+	gpu_metrics->common_header.format_revision = 1;
+	gpu_metrics->common_header.content_revision = 0;
+
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 05/17] drm/amd/powerplay: update the data structure for NV12 SmuMetrics
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (2 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 04/17] drm/amd/powerplay: add Arcturus support for gpu metrics export Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 06/17] drm/amd/powerplay: add Navi1x support for gpu metrics export Evan Quan
                   ` (11 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Although it does not bring any problem for now, the coming gpu
metrics interface needs to handle them differently based on the
asic type.

Change-Id: I88ee78c26795267588f944d4f1983e4dbf23ba85
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../powerplay/inc/smu11_driver_if_navi10.h    | 39 +++++++++++++++++++
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c    | 18 +++++++--
 2 files changed, 54 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index 4b2da98afcd2..246d3951a78a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -884,6 +884,45 @@ typedef struct {
   uint32_t     MmHubPadding[8]; // SMU internal use
 } SmuMetrics_t;
 
+typedef struct {
+  uint16_t CurrClock[PPCLK_COUNT];
+  uint16_t AverageGfxclkFrequency;
+  uint16_t AverageSocclkFrequency;
+  uint16_t AverageUclkFrequency  ;
+  uint16_t AverageGfxActivity    ;
+  uint16_t AverageUclkActivity   ;
+  uint8_t  CurrSocVoltageOffset  ;
+  uint8_t  CurrGfxVoltageOffset  ;
+  uint8_t  CurrMemVidOffset      ;
+  uint8_t  Padding8              ;
+  uint16_t AverageSocketPower    ;
+  uint16_t TemperatureEdge       ;
+  uint16_t TemperatureHotspot    ;
+  uint16_t TemperatureMem        ;
+  uint16_t TemperatureVrGfx      ;
+  uint16_t TemperatureVrMem0     ;
+  uint16_t TemperatureVrMem1     ;
+  uint16_t TemperatureVrSoc      ;
+  uint16_t TemperatureLiquid0    ;
+  uint16_t TemperatureLiquid1    ;
+  uint16_t TemperaturePlx        ;
+  uint16_t Padding16             ;
+  uint32_t ThrottlerStatus       ;
+
+  uint8_t  LinkDpmLevel;
+  uint8_t  Padding8_2;
+  uint16_t CurrFanSpeed;
+
+  uint32_t EnergyAccumulator;
+  uint16_t AverageVclkFrequency  ;
+  uint16_t AverageDclkFrequency  ;
+  uint16_t VcnActivityPercentage ;
+  uint16_t padding16_2;
+
+  // Padding - ignore
+  uint32_t     MmHubPadding[8]; // SMU internal use
+} SmuMetrics_NV12_t;
+
 typedef struct {
   uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
   uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 3a3c555f0e82..a34beb27849e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -456,13 +456,18 @@ static int navi10_tables_init(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct smu_table *tables = smu_table->tables;
+	struct amdgpu_device *adev = smu->adev;
 
 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
-		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+	if (adev->asic_type == CHIP_NAVI12)
+		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV12_t),
+			       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+	else
+		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+			       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 	SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
@@ -473,7 +478,9 @@ static int navi10_tables_init(struct smu_context *smu)
 		       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
 		       AMDGPU_GEM_DOMAIN_VRAM);
 
-	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+	smu_table->metrics_table = kzalloc(adev->asic_type == CHIP_NAVI12 ?
+					   sizeof(SmuMetrics_NV12_t) :
+					   sizeof(SmuMetrics_t), GFP_KERNEL);
 	if (!smu_table->metrics_table)
 		return -ENOMEM;
 	smu_table->metrics_time = 0;
@@ -490,6 +497,11 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
 				       uint32_t *value)
 {
 	struct smu_table_context *smu_table= &smu->smu_table;
+	/*
+	 * This works for NV12 also. As although NV12 uses a different
+	 * SmuMetrics structure from other NV1X ASICs, they share the
+	 * same offsets for the heading parts(those members used here).
+	 */
 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
 	int ret = 0;
 
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 06/17] drm/amd/powerplay: add Navi1x support for gpu metrics export
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (3 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 05/17] drm/amd/powerplay: update the data structure for NV12 SmuMetrics Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 07/17] drm/amd/powerplay: add Sienna Cichlid " Evan Quan
                   ` (10 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Add Navi1x gpu metrics export interface.

Change-Id: I9028fb925e70c36fb2a0b00968c462c0bbc822db
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 93 +++++++++++++++++++++-
 1 file changed, 91 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index a34beb27849e..a4ab1ace38fe 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -482,14 +482,26 @@ static int navi10_tables_init(struct smu_context *smu)
 					   sizeof(SmuMetrics_NV12_t) :
 					   sizeof(SmuMetrics_t), GFP_KERNEL);
 	if (!smu_table->metrics_table)
-		return -ENOMEM;
+		goto err0_out;
 	smu_table->metrics_time = 0;
 
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
+	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+	if (!smu_table->gpu_metrics_table)
+		goto err1_out;
+
 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
 	if (!smu_table->watermarks_table)
-		return -ENOMEM;
+		goto err2_out;
 
 	return 0;
+
+err2_out:
+	kfree(smu_table->gpu_metrics_table);
+err1_out:
+	kfree(smu_table->metrics_table);
+err0_out:
+	return -ENOMEM;
 }
 
 static int navi10_get_smu_metrics_data(struct smu_context *smu,
@@ -2501,6 +2513,82 @@ static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter
 	i2c_del_adapter(control);
 }
 
+static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
+				      void **table)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct gpu_metrics_v1_0 *gpu_metrics =
+		(struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
+	struct amdgpu_device *adev = smu->adev;
+	SmuMetrics_NV12_t nv12_metrics = { 0 };
+	SmuMetrics_t metrics;
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+
+	ret = smu_cmn_update_table(smu,
+				   SMU_TABLE_SMU_METRICS,
+				   0,
+				   smu_table->metrics_table,
+				   false);
+	if (ret) {
+		dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
+		mutex_unlock(&smu->metrics_lock);
+		return ret;
+	}
+	smu_table->metrics_time = jiffies;
+
+	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+	if (adev->asic_type == CHIP_NAVI12)
+		memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
+
+	mutex_unlock(&smu->metrics_lock);
+
+	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+
+	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+	gpu_metrics->temperature_mem = metrics.TemperatureMem;
+	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+
+	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+	if (adev->asic_type == CHIP_NAVI12) {
+		gpu_metrics->energy_accumulator = nv12_metrics.EnergyAccumulator;
+		gpu_metrics->average_vclk0_frequency = nv12_metrics.AverageVclkFrequency;
+		gpu_metrics->average_dclk0_frequency = nv12_metrics.AverageDclkFrequency;
+		gpu_metrics->average_mm_activity = nv12_metrics.VcnActivityPercentage;
+	}
+
+	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+
+	gpu_metrics->pcie_link_width =
+			smu_v11_0_get_current_pcie_link_width(smu);
+	gpu_metrics->pcie_link_speed =
+			smu_v11_0_get_current_pcie_link_speed(smu);
+
+	*table = (void *)gpu_metrics;
+
+	return sizeof(struct gpu_metrics_v1_0);
+}
 
 static const struct pptable_funcs navi10_ppt_funcs = {
 	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
@@ -2582,6 +2670,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.set_power_source = smu_v11_0_set_power_source,
 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+	.get_gpu_metrics = navi10_get_gpu_metrics,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 07/17] drm/amd/powerplay: add Sienna Cichlid support for gpu metrics export
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (4 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 06/17] drm/amd/powerplay: add Navi1x support for gpu metrics export Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 08/17] drm/amd/powerplay: add Renoir support for gpu metrics export(V2) Evan Quan
                   ` (9 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Add Sienna Cichlid gpu metrics export interface.

Change-Id: I89e6a4415fe467e7e4aaabe07d9e8cee379caa25
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/powerplay/sienna_cichlid_ppt.c    | 91 ++++++++++++++++++-
 1 file changed, 89 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
index c15496aea50f..345b8571f716 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
@@ -385,14 +385,26 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
 
 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
 	if (!smu_table->metrics_table)
-		return -ENOMEM;
+		goto err0_out;
 	smu_table->metrics_time = 0;
 
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
+	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+	if (!smu_table->gpu_metrics_table)
+		goto err1_out;
+
 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
 	if (!smu_table->watermarks_table)
-		return -ENOMEM;
+		goto err2_out;
 
 	return 0;
+
+err2_out:
+	kfree(smu_table->gpu_metrics_table);
+err1_out:
+	kfree(smu_table->metrics_table);
+err0_out:
+	return -ENOMEM;
 }
 
 static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
@@ -2663,6 +2675,80 @@ static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_
 	i2c_del_adapter(control);
 }
 
+static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
+					      void **table)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct gpu_metrics_v1_0 *gpu_metrics =
+		(struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
+	SmuMetrics_t metrics;
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+
+	ret = smu_cmn_update_table(smu,
+				   SMU_TABLE_SMU_METRICS,
+				   0,
+				   smu_table->metrics_table,
+				   false);
+	if (ret) {
+		dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
+		mutex_unlock(&smu->metrics_lock);
+		return ret;
+	}
+	smu_table->metrics_time = jiffies;
+
+	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+	mutex_unlock(&smu->metrics_lock);
+
+	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+
+	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+	gpu_metrics->temperature_mem = metrics.TemperatureMem;
+	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+	gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
+
+	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+	gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
+
+	if (metrics.AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
+		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
+	else
+		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
+	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
+	gpu_metrics->average_vclk0_frequency = metrics.AverageVclk0Frequency;
+	gpu_metrics->average_dclk0_frequency = metrics.AverageDclk0Frequency;
+	gpu_metrics->average_vclk1_frequency = metrics.AverageVclk1Frequency;
+	gpu_metrics->average_dclk1_frequency = metrics.AverageDclk1Frequency;
+
+	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK_0];
+	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK_0];
+	gpu_metrics->current_vclk1 = metrics.CurrClock[PPCLK_VCLK_1];
+	gpu_metrics->current_dclk1 = metrics.CurrClock[PPCLK_DCLK_1];
+
+	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+
+	gpu_metrics->pcie_link_width =
+			smu_v11_0_get_current_pcie_link_width(smu);
+	gpu_metrics->pcie_link_speed =
+			smu_v11_0_get_current_pcie_link_speed(smu);
+
+	*table = (void *)gpu_metrics;
+
+	return sizeof(struct gpu_metrics_v1_0);
+}
 
 static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
 	.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
@@ -2740,6 +2826,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+	.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 08/17] drm/amd/powerplay: add Renoir support for gpu metrics export(V2)
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (5 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 07/17] drm/amd/powerplay: add Sienna Cichlid " Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31 14:41   ` Nirmoy
  2020-07-31  2:43 ` [PATCH 09/17] drm/amd/powerplay: enable gpu_metrics export on legacy powerplay routines Evan Quan
                   ` (8 subsequent siblings)
  15 siblings, 1 reply; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Add Renoir gpu metrics export interface.

V2: use memcpy to make code more compact

Change-Id: Ic83265536eeaa9e458dc395b2be18ea49da4c68a
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h |  2 +
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c    | 80 ++++++++++++++++++-
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c     | 12 +++
 3 files changed, 91 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 02de3b6199e5..fa2e8cb07967 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 
 int smu_v12_0_set_driver_table_location(struct smu_context *smu);
 
+void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
+
 #endif
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 575ae4be98a2..61e8700a7bdb 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -166,18 +166,32 @@ static int renoir_init_smc_tables(struct smu_context *smu)
 
 	smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
 	if (!smu_table->clocks_table)
-		return -ENOMEM;
+		goto err0_out;
 
 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
 	if (!smu_table->metrics_table)
-		return -ENOMEM;
+		goto err1_out;
 	smu_table->metrics_time = 0;
 
 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
 	if (!smu_table->watermarks_table)
-		return -ENOMEM;
+		goto err2_out;
+
+	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_0);
+	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+	if (!smu_table->gpu_metrics_table)
+		goto err3_out;
 
 	return 0;
+
+err3_out:
+	kfree(smu_table->watermarks_table);
+err2_out:
+	kfree(smu_table->metrics_table);
+err1_out:
+	kfree(smu_table->clocks_table);
+err0_out:
+	return -ENOMEM;
 }
 
 /**
@@ -995,6 +1009,65 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
 
 }
 
+static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
+				      void **table)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct gpu_metrics_v2_0 *gpu_metrics =
+		(struct gpu_metrics_v2_0 *)smu_table->gpu_metrics_table;
+	SmuMetrics_t metrics;
+	int ret = 0;
+
+	ret = renoir_get_metrics_table(smu, &metrics);
+	if (ret)
+		return ret;
+
+	smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics);
+
+	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
+	gpu_metrics->temperature_soc = metrics.SocTemperature;
+	memcpy(&gpu_metrics->temperature_core[0],
+		&metrics.CoreTemperature[0],
+		sizeof(uint16_t) * 8);
+	gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
+	gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
+
+	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+	gpu_metrics->average_mm_activity = metrics.AverageUvdActivity;
+
+	gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
+	gpu_metrics->average_cpu_power = metrics.Power[0];
+	gpu_metrics->average_soc_power = metrics.Power[1];
+	memcpy(&gpu_metrics->average_core_power[0],
+		&metrics.CorePower[0],
+		sizeof(uint16_t) * 8);
+
+	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+	gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency;
+	gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency;
+
+	gpu_metrics->current_gfxclk = metrics.ClockFrequency[CLOCK_GFXCLK];
+	gpu_metrics->current_socclk = metrics.ClockFrequency[CLOCK_SOCCLK];
+	gpu_metrics->current_uclk = metrics.ClockFrequency[CLOCK_UMCCLK];
+	gpu_metrics->current_fclk = metrics.ClockFrequency[CLOCK_FCLK];
+	gpu_metrics->current_vclk = metrics.ClockFrequency[CLOCK_VCLK];
+	gpu_metrics->current_dclk = metrics.ClockFrequency[CLOCK_DCLK];
+	memcpy(&gpu_metrics->current_coreclk[0],
+		&metrics.CoreFrequency[0],
+		sizeof(uint16_t) * 8);
+	gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
+	gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
+
+	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+	gpu_metrics->fan_pwm = metrics.FanPwm;
+
+	*table = (void *)gpu_metrics;
+
+	return sizeof(struct gpu_metrics_v2_0);
+}
+
 static const struct pptable_funcs renoir_ppt_funcs = {
 	.set_power_state = NULL,
 	.print_clk_levels = renoir_print_clk_levels,
@@ -1029,6 +1102,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.is_dpm_running = renoir_is_dpm_running,
 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+	.get_gpu_metrics = renoir_get_gpu_metrics,
 };
 
 void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 31456437bb18..660f403d5770 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -274,3 +274,15 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
 
 	return ret;
 }
+
+void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
+{
+	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
+
+	gpu_metrics->common_header.structure_size =
+				sizeof(struct gpu_metrics_v2_0);
+	gpu_metrics->common_header.format_revision = 2;
+	gpu_metrics->common_header.content_revision = 0;
+
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 09/17] drm/amd/powerplay: enable gpu_metrics export on legacy powerplay routines
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (6 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 08/17] drm/amd/powerplay: add Renoir support for gpu metrics export(V2) Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 10/17] drm/amd/powerplay: add Vega20 support for gpu metrics export Evan Quan
                   ` (7 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Enable gpu_metrics support on legacy powerplay routines.

Change-Id: Ic2f09babe7e6bead9a838b7ce3c94bf8d4110991
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 19 +++++++++++++++++++
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h     |  1 +
 2 files changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7e6dcdf7df73..a6321f2063c1 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1598,6 +1598,24 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
 	return 0;
 }
 
+static ssize_t pp_get_gpu_metrics(void *handle, void **table)
+{
+	struct pp_hwmgr *hwmgr = handle;
+	ssize_t size;
+
+	if (!hwmgr)
+		return -EINVAL;
+
+	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&hwmgr->smu_lock);
+	size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
+	mutex_unlock(&hwmgr->smu_lock);
+
+	return size;
+}
+
 static const struct amd_pm_funcs pp_dpm_funcs = {
 	.load_firmware = pp_dpm_load_fw,
 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1658,4 +1676,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
 	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
 	.set_df_cstate = pp_set_df_cstate,
 	.set_xgmi_pstate = pp_set_xgmi_pstate,
+	.get_gpu_metrics = pp_get_gpu_metrics,
 };
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 15ed6cbdf366..1b3529efc91e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -359,6 +359,7 @@ struct pp_hwmgr_func {
 	int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
 	int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
 					bool disable);
+	ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table);
 };
 
 struct pp_table_func {
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 10/17] drm/amd/powerplay: add Vega20 support for gpu metrics export
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (7 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 09/17] drm/amd/powerplay: enable gpu_metrics export on legacy powerplay routines Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 11/17] drm/amd/powerplay: add Vega12 " Evan Quan
                   ` (6 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Add Vega20 gpu metrics export interface.

Change-Id: I7b4ab850358cc6d7455889d9031a7111cba35ebd
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c    | 123 +++++++++++++++++-
 .../drm/amd/powerplay/hwmgr/vega20_hwmgr.h    |   1 +
 2 files changed, 117 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index bacbe2fa1f9a..037bebda2eae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -55,6 +55,11 @@
 #define smnPCIE_LC_SPEED_CNTL			0x11140290
 #define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
 
+#define LINK_WIDTH_MAX				6
+#define LINK_SPEED_MAX				3
+static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static int link_speed[] = {25, 50, 80, 160};
+
 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
 {
 	struct vega20_hwmgr *data =
@@ -3265,6 +3270,46 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
 	return 0;
 }
 
+static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
+{
+	uint32_t width_level;
+
+	width_level = vega20_get_current_pcie_link_width_level(hwmgr);
+	if (width_level > LINK_WIDTH_MAX)
+		width_level = 0;
+
+	return link_width[width_level];
+}
+
+static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+		PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
+static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
+{
+	uint32_t speed_level;
+
+	speed_level = vega20_get_current_pcie_link_speed_level(hwmgr);
+	if (speed_level > LINK_SPEED_MAX)
+		speed_level = 0;
+
+	return link_speed[speed_level];
+}
+
 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, char *buf)
 {
@@ -3277,7 +3322,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 	struct phm_ppt_v3_information *pptable_information =
 		(struct phm_ppt_v3_information *)hwmgr->pptable;
 	PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
-	struct amdgpu_device *adev = hwmgr->adev;
 	struct pp_clock_levels_with_latency clocks;
 	struct vega20_single_dpm_table *fclk_dpm_table =
 			&(data->dpm_table.fclk_table);
@@ -3371,12 +3415,10 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		break;
 
 	case PP_PCIE:
-		current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
-			     PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
-			    >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
-		current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
-			      PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
-			    >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+		current_gen_speed =
+			vega20_get_current_pcie_link_speed_level(hwmgr);
+		current_lane_width =
+			vega20_get_current_pcie_link_width_level(hwmgr);
 		for (i = 0; i < NUM_LINK_LEVELS; i++) {
 			if (i == 1 && data->pcie_parameters_override) {
 				gen_speed = data->pcie_gen_level1;
@@ -4218,6 +4260,72 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
 	return ret;
 }
 
+static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
+{
+	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
+
+	gpu_metrics->common_header.structure_size =
+				sizeof(struct gpu_metrics_v1_0);
+	gpu_metrics->common_header.format_revision = 1;
+	gpu_metrics->common_header.content_revision = 0;
+
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
+
+static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
+				      void **table)
+{
+	struct vega20_hwmgr *data =
+			(struct vega20_hwmgr *)(hwmgr->backend);
+	struct gpu_metrics_v1_0 *gpu_metrics =
+			&data->gpu_metrics_table;
+	SmuMetrics_t metrics;
+	uint32_t fan_speed_rpm;
+	int ret;
+
+	ret = vega20_get_metrics_table(hwmgr, &metrics);
+	if (ret)
+		return ret;
+
+	vega20_init_gpu_metrics_v1_0(gpu_metrics);
+
+	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
+	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+
+	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+	vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
+	gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
+
+	gpu_metrics->pcie_link_width =
+			vega20_get_current_pcie_link_width(hwmgr);
+	gpu_metrics->pcie_link_speed =
+			vega20_get_current_pcie_link_speed(hwmgr);
+
+	*table = (void *)gpu_metrics;
+
+	return sizeof(struct gpu_metrics_v1_0);
+}
+
 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
 	/* init/fini related */
 	.backend_init = vega20_hwmgr_backend_init,
@@ -4288,6 +4396,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
 	.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
 	.set_df_cstate = vega20_set_df_cstate,
 	.set_xgmi_pstate = vega20_set_xgmi_pstate,
+	.get_gpu_metrics = vega20_get_gpu_metrics,
 };
 
 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 2c3125f82b24..075c0094da9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -527,6 +527,7 @@ struct vega20_hwmgr {
 
 	unsigned long                  metrics_time;
 	SmuMetrics_t                   metrics_table;
+	struct gpu_metrics_v1_0        gpu_metrics_table;
 
 	bool                           pcie_parameters_override;
 	uint32_t                       pcie_gen_level1;
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 11/17] drm/amd/powerplay: add Vega12 support for gpu metrics export
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (8 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 10/17] drm/amd/powerplay: add Vega20 support for gpu metrics export Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-08-04 20:40   ` Alex Deucher
  2020-07-31  2:43 ` [PATCH 12/17] drm/amd/powerplay: add control method to bypass metrics cache on Arcturus Evan Quan
                   ` (5 subsequent siblings)
  15 siblings, 1 reply; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

Add Vega12 gpu metrics export interface.

Change-Id: I2c910f523049f0f90eecb8d74cb73ebb39a22bd9
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 111 ++++++++++++++++++
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.h    |   1 +
 2 files changed, 112 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index a678a67f1c0d..40bb0c2e4e8c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -47,6 +47,13 @@
 #include "pp_thermal.h"
 #include "vega12_baco.h"
 
+#define smnPCIE_LC_SPEED_CNTL			0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
+
+#define LINK_WIDTH_MAX				6
+#define LINK_SPEED_MAX				3
+static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static int link_speed[] = {25, 50, 80, 160};
 
 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, uint32_t mask);
@@ -2095,6 +2102,46 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
 	return 0;
 }
 
+static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
+{
+	uint32_t width_level;
+
+	width_level = vega12_get_current_pcie_link_width_level(hwmgr);
+	if (width_level > LINK_WIDTH_MAX)
+		width_level = 0;
+
+	return link_width[width_level];
+}
+
+static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+		PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
+static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
+{
+	uint32_t speed_level;
+
+	speed_level = vega12_get_current_pcie_link_speed_level(hwmgr);
+	if (speed_level > LINK_SPEED_MAX)
+		speed_level = 0;
+
+	return link_speed[speed_level];
+}
+
 static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, char *buf)
 {
@@ -2682,6 +2729,69 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
 	return 0;
 }
 
+static void vega12_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
+{
+	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
+
+	gpu_metrics->common_header.structure_size =
+				sizeof(struct gpu_metrics_v1_0);
+	gpu_metrics->common_header.format_revision = 1;
+	gpu_metrics->common_header.content_revision = 0;
+
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
+
+static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
+				      void **table)
+{
+	struct vega12_hwmgr *data =
+			(struct vega12_hwmgr *)(hwmgr->backend);
+	struct gpu_metrics_v1_0 *gpu_metrics =
+			&data->gpu_metrics_table;
+	SmuMetrics_t metrics;
+	uint32_t fan_speed_rpm;
+	int ret;
+
+	ret = vega12_get_metrics_table(hwmgr, &metrics);
+	if (ret)
+		return ret;
+
+	vega12_init_gpu_metrics_v1_0(gpu_metrics);
+
+	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
+	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
+
+	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+	vega12_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
+	gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
+
+	gpu_metrics->pcie_link_width =
+			vega12_get_current_pcie_link_width(hwmgr);
+	gpu_metrics->pcie_link_speed =
+			vega12_get_current_pcie_link_speed(hwmgr);
+
+	*table = (void *)gpu_metrics;
+
+	return sizeof(struct gpu_metrics_v1_0);
+}
+
 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
 	.backend_init = vega12_hwmgr_backend_init,
 	.backend_fini = vega12_hwmgr_backend_fini,
@@ -2739,6 +2849,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
 	.get_ppfeature_status = vega12_get_ppfeature_status,
 	.set_ppfeature_status = vega12_set_ppfeature_status,
 	.set_mp1_state = vega12_set_mp1_state,
+	.get_gpu_metrics = vega12_get_gpu_metrics,
 };
 
 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index 73875399666a..aa63ae41942d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -399,6 +399,7 @@ struct vega12_hwmgr {
 
 	unsigned long                  metrics_time;
 	SmuMetrics_t                   metrics_table;
+	struct gpu_metrics_v1_0        gpu_metrics_table;
 };
 
 #define VEGA12_DPM2_NEAR_TDP_DEC                      10
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 12/17] drm/amd/powerplay: add control method to bypass metrics cache on Arcturus
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (9 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 11/17] drm/amd/powerplay: add Vega12 " Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 13/17] drm/amd/powerplay: add control method to bypass metrics cache on Navi10 Evan Quan
                   ` (4 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

As for the gpu metric export, metrics cache makes no sense. It's up to
user to decide how often the metrics should be retrieved.

Change-Id: Ie6e9377f5984c3c09737b323c52249f9189bcaf5
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 74 +++++++++++++-------
 1 file changed, 49 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 39bfe0ebfea3..d678534ddc69 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -541,18 +541,16 @@ static int arcturus_freqs_in_same_level(int32_t frequency1,
 	return (abs(frequency1 - frequency2) <= EPSILON);
 }
 
-static int arcturus_get_smu_metrics_data(struct smu_context *smu,
-					 MetricsMember_t member,
-					 uint32_t *value)
+static int arcturus_get_metrics_table_locked(struct smu_context *smu,
+					     SmuMetrics_t *metrics_table,
+					     bool bypass_cache)
 {
 	struct smu_table_context *smu_table= &smu->smu_table;
-	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
 	int ret = 0;
 
-	mutex_lock(&smu->metrics_lock);
-
-	if (!smu_table->metrics_time ||
-	     time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
+	if (bypass_cache ||
+	    !smu_table->metrics_time ||
+	    time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
 		ret = smu_cmn_update_table(smu,
 				       SMU_TABLE_SMU_METRICS,
 				       0,
@@ -560,12 +558,50 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
 				       false);
 		if (ret) {
 			dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
-			mutex_unlock(&smu->metrics_lock);
 			return ret;
 		}
 		smu_table->metrics_time = jiffies;
 	}
 
+	if (metrics_table)
+		memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+	return 0;
+}
+
+static int arcturus_get_metrics_table(struct smu_context *smu,
+				      SmuMetrics_t *metrics_table,
+				      bool bypass_cache)
+{
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+	ret = arcturus_get_metrics_table_locked(smu,
+						metrics_table,
+						bypass_cache);
+	mutex_unlock(&smu->metrics_lock);
+
+	return ret;
+}
+
+static int arcturus_get_smu_metrics_data(struct smu_context *smu,
+					 MetricsMember_t member,
+					 uint32_t *value)
+{
+	struct smu_table_context *smu_table= &smu->smu_table;
+	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+
+	ret = arcturus_get_metrics_table_locked(smu,
+						NULL,
+						false);
+	if (ret) {
+		mutex_unlock(&smu->metrics_lock);
+		return ret;
+	}
+
 	switch (member) {
 	case METRICS_CURR_GFXCLK:
 		*value = metrics->CurrClock[PPCLK_GFXCLK];
@@ -2285,23 +2321,11 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
 	SmuMetrics_t metrics;
 	int ret = 0;
 
-	mutex_lock(&smu->metrics_lock);
-
-	ret = smu_cmn_update_table(smu,
-				   SMU_TABLE_SMU_METRICS,
-				   0,
-				   smu_table->metrics_table,
-				   false);
-	if (ret) {
-		dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
-		mutex_unlock(&smu->metrics_lock);
+	ret = arcturus_get_metrics_table(smu,
+					 &metrics,
+					 true);
+	if (ret)
 		return ret;
-	}
-	smu_table->metrics_time = jiffies;
-
-	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
-
-	mutex_unlock(&smu->metrics_lock);
 
 	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
 
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 13/17] drm/amd/powerplay: add control method to bypass metrics cache on Navi10
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (10 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 12/17] drm/amd/powerplay: add control method to bypass metrics cache on Arcturus Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 14/17] drm/amd/powerplay: add control method to bypass metrics cache on Sienna Cichlid Evan Quan
                   ` (3 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

As for the gpu metric export, metrics cache makes no sense. It's up to
user to decide how often the metrics should be retrieved.

Change-Id: I281b4de9262b98f0c52131feb39ba9e101b548b7
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 60 ++++++++++++++--------
 1 file changed, 38 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index a4ab1ace38fe..ee8d938ea3bd 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -504,22 +504,16 @@ static int navi10_tables_init(struct smu_context *smu)
 	return -ENOMEM;
 }
 
-static int navi10_get_smu_metrics_data(struct smu_context *smu,
-				       MetricsMember_t member,
-				       uint32_t *value)
+static int navi10_get_metrics_table_locked(struct smu_context *smu,
+					   SmuMetrics_t *metrics_table,
+					   bool bypass_cache)
 {
 	struct smu_table_context *smu_table= &smu->smu_table;
-	/*
-	 * This works for NV12 also. As although NV12 uses a different
-	 * SmuMetrics structure from other NV1X ASICs, they share the
-	 * same offsets for the heading parts(those members used here).
-	 */
-	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
 	int ret = 0;
 
-	mutex_lock(&smu->metrics_lock);
-	if (!smu_table->metrics_time ||
-	     time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
+	if (bypass_cache ||
+	    !smu_table->metrics_time ||
+	    time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
 		ret = smu_cmn_update_table(smu,
 				       SMU_TABLE_SMU_METRICS,
 				       0,
@@ -527,12 +521,40 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
 				       false);
 		if (ret) {
 			dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
-			mutex_unlock(&smu->metrics_lock);
 			return ret;
 		}
 		smu_table->metrics_time = jiffies;
 	}
 
+	if (metrics_table)
+		memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+	return 0;
+}
+
+static int navi10_get_smu_metrics_data(struct smu_context *smu,
+				       MetricsMember_t member,
+				       uint32_t *value)
+{
+	struct smu_table_context *smu_table= &smu->smu_table;
+	/*
+	 * This works for NV12 also. As although NV12 uses a different
+	 * SmuMetrics structure from other NV1X ASICs, they share the
+	 * same offsets for the heading parts(those members used here).
+	 */
+	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+
+	ret = navi10_get_metrics_table_locked(smu,
+					      NULL,
+					      false);
+	if (ret) {
+		mutex_unlock(&smu->metrics_lock);
+		return ret;
+	}
+
 	switch (member) {
 	case METRICS_CURR_GFXCLK:
 		*value = metrics->CurrClock[PPCLK_GFXCLK];
@@ -2526,19 +2548,13 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
 	mutex_lock(&smu->metrics_lock);
 
-	ret = smu_cmn_update_table(smu,
-				   SMU_TABLE_SMU_METRICS,
-				   0,
-				   smu_table->metrics_table,
-				   false);
+	ret = navi10_get_metrics_table_locked(smu,
+					      &metrics,
+					      true);
 	if (ret) {
-		dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
 		mutex_unlock(&smu->metrics_lock);
 		return ret;
 	}
-	smu_table->metrics_time = jiffies;
-
-	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
 
 	if (adev->asic_type == CHIP_NAVI12)
 		memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 14/17] drm/amd/powerplay: add control method to bypass metrics cache on Sienna Cichlid
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (11 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 13/17] drm/amd/powerplay: add control method to bypass metrics cache on Navi10 Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 15/17] drm/amd/powerplay: add control method to bypass metrics cache on Renoir Evan Quan
                   ` (2 subsequent siblings)
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

As for the gpu metric export, metrics cache makes no sense. It's up to
user to decide how often the metrics should be retrieved.

Change-Id: Ic9d5f10b470584c82d4ca9035ab27fed44f0ac20
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 .../drm/amd/powerplay/sienna_cichlid_ppt.c    | 73 +++++++++++++------
 1 file changed, 49 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
index 345b8571f716..a95c82a709d8 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
@@ -407,17 +407,16 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
 	return -ENOMEM;
 }
 
-static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
-					       MetricsMember_t member,
-					       uint32_t *value)
+static int sienna_cichlid_get_metrics_table_locked(struct smu_context *smu,
+						   SmuMetrics_t *metrics_table,
+						   bool bypass_cache)
 {
 	struct smu_table_context *smu_table= &smu->smu_table;
-	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
 	int ret = 0;
 
-	mutex_lock(&smu->metrics_lock);
-	if (!smu_table->metrics_time ||
-	     time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
+	if (bypass_cache ||
+	    !smu_table->metrics_time ||
+	    time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
 		ret = smu_cmn_update_table(smu,
 				       SMU_TABLE_SMU_METRICS,
 				       0,
@@ -425,12 +424,50 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
 				       false);
 		if (ret) {
 			dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
-			mutex_unlock(&smu->metrics_lock);
 			return ret;
 		}
 		smu_table->metrics_time = jiffies;
 	}
 
+	if (metrics_table)
+		memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+	return 0;
+}
+
+static int sienna_cichlid_get_metrics_table(struct smu_context *smu,
+					    SmuMetrics_t *metrics_table,
+					    bool bypass_cache)
+{
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+	ret = sienna_cichlid_get_metrics_table_locked(smu,
+						      metrics_table,
+						      bypass_cache);
+	mutex_unlock(&smu->metrics_lock);
+
+	return ret;
+}
+
+static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
+					       MetricsMember_t member,
+					       uint32_t *value)
+{
+	struct smu_table_context *smu_table= &smu->smu_table;
+	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+	int ret = 0;
+
+	mutex_lock(&smu->metrics_lock);
+
+	ret = sienna_cichlid_get_metrics_table_locked(smu,
+						      NULL,
+						      false);
+	if (ret) {
+		mutex_unlock(&smu->metrics_lock);
+		return ret;
+	}
+
 	switch (member) {
 	case METRICS_CURR_GFXCLK:
 		*value = metrics->CurrClock[PPCLK_GFXCLK];
@@ -2684,23 +2721,11 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 	SmuMetrics_t metrics;
 	int ret = 0;
 
-	mutex_lock(&smu->metrics_lock);
-
-	ret = smu_cmn_update_table(smu,
-				   SMU_TABLE_SMU_METRICS,
-				   0,
-				   smu_table->metrics_table,
-				   false);
-	if (ret) {
-		dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
-		mutex_unlock(&smu->metrics_lock);
+	ret = sienna_cichlid_get_metrics_table(smu,
+					       &metrics,
+					       true);
+	if (ret)
 		return ret;
-	}
-	smu_table->metrics_time = jiffies;
-
-	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
-
-	mutex_unlock(&smu->metrics_lock);
 
 	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
 
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 15/17] drm/amd/powerplay: add control method to bypass metrics cache on Renoir
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (12 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 14/17] drm/amd/powerplay: add control method to bypass metrics cache on Sienna Cichlid Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 16/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega20 Evan Quan
  2020-07-31  2:43 ` [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12 Evan Quan
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

As for the gpu metric export, metrics cache makes no sense. It's up to
user to decide how often the metrics should be retrieved.

Change-Id: I780aba0be35a35bd9c9727118b33625e7cc9bf1f
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 61e8700a7bdb..4c1a506c3c17 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -129,13 +129,17 @@ static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
 };
 
 static int renoir_get_metrics_table(struct smu_context *smu,
-				    SmuMetrics_t *metrics_table)
+				    SmuMetrics_t *metrics_table,
+				    bool bypass_cache)
 {
 	struct smu_table_context *smu_table= &smu->smu_table;
 	int ret = 0;
 
 	mutex_lock(&smu->metrics_lock);
-	if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
+
+	if (bypass_cache ||
+	    !smu_table->metrics_time ||
+	    time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
 		ret = smu_cmn_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
 				(void *)smu_table->metrics_table, false);
 		if (ret) {
@@ -146,7 +150,9 @@ static int renoir_get_metrics_table(struct smu_context *smu,
 		smu_table->metrics_time = jiffies;
 	}
 
-	memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+	if (metrics_table)
+		memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
 	mutex_unlock(&smu->metrics_lock);
 
 	return ret;
@@ -375,7 +381,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 
 	memset(&metrics, 0, sizeof(metrics));
 
-	ret = renoir_get_metrics_table(smu, &metrics);
+	ret = renoir_get_metrics_table(smu, &metrics, false);
 	if (ret)
 		return ret;
 
@@ -529,7 +535,7 @@ static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
 	int ret = 0, clk_id = 0;
 	SmuMetrics_t metrics;
 
-	ret = renoir_get_metrics_table(smu, &metrics);
+	ret = renoir_get_metrics_table(smu, &metrics, false);
 	if (ret)
 		return ret;
 
@@ -612,7 +618,7 @@ static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value)
 	if (!value)
 		return -EINVAL;
 
-	ret = renoir_get_metrics_table(smu, &metrics);
+	ret = renoir_get_metrics_table(smu, &metrics, false);
 	if (ret)
 		return ret;
 
@@ -632,7 +638,7 @@ static int renoir_get_current_activity_percent(struct smu_context *smu,
 	if (!value)
 		return -EINVAL;
 
-	ret = renoir_get_metrics_table(smu, &metrics);
+	ret = renoir_get_metrics_table(smu, &metrics, false);
 	if (ret)
 		return ret;
 
@@ -1018,7 +1024,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
 	SmuMetrics_t metrics;
 	int ret = 0;
 
-	ret = renoir_get_metrics_table(smu, &metrics);
+	ret = renoir_get_metrics_table(smu, &metrics, true);
 	if (ret)
 		return ret;
 
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 16/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega20
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (13 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 15/17] drm/amd/powerplay: add control method to bypass metrics cache on Renoir Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-07-31  2:43 ` [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12 Evan Quan
  15 siblings, 0 replies; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

As for the gpu metric export, metrics cache makes no sense. It's up to
user to decide how often the metrics should be retrieved.

Change-Id: I8836f7f096dceb08a90dd3c899d2e9ccea1ef1f3
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c    | 31 ++++++++++++-------
 1 file changed, 19 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 037bebda2eae..86d3a10379be 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2090,22 +2090,29 @@ static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
 	return (mem_clk * 100);
 }
 
-static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
+static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr,
+				    SmuMetrics_t *metrics_table,
+				    bool bypass_cache)
 {
 	struct vega20_hwmgr *data =
 			(struct vega20_hwmgr *)(hwmgr->backend);
 	int ret = 0;
 
-	if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
-		ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
-				TABLE_SMU_METRICS, true);
+	if (bypass_cache ||
+	    !data->metrics_time ||
+	    time_after(jiffies, data->metrics_time + HZ / 2)) {
+		ret = smum_smc_table_manager(hwmgr,
+					     (uint8_t *)(&data->metrics_table),
+					     TABLE_SMU_METRICS,
+					     true);
 		if (ret) {
 			pr_info("Failed to export SMU metrics table!\n");
 			return ret;
 		}
-		memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
 		data->metrics_time = jiffies;
-	} else
+	}
+
+	if (metrics_table)
 		memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
 
 	return ret;
@@ -2117,7 +2124,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
 	int ret = 0;
 	SmuMetrics_t metrics_table;
 
-	ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+	ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
 	if (ret)
 		return ret;
 
@@ -2155,7 +2162,7 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
 	int ret = 0;
 	SmuMetrics_t metrics_table;
 
-	ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+	ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
 	if (ret)
 		return ret;
 
@@ -2185,7 +2192,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
 	switch (idx) {
 	case AMDGPU_PP_SENSOR_GFX_SCLK:
-		ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+		ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
 		if (ret)
 			return ret;
 
@@ -2210,7 +2217,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
-		ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+		ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
 		if (ret)
 			return ret;
 
@@ -2219,7 +2226,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_MEM_TEMP:
-		ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+		ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
 		if (ret)
 			return ret;
 
@@ -4283,7 +4290,7 @@ static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
 	uint32_t fan_speed_rpm;
 	int ret;
 
-	ret = vega20_get_metrics_table(hwmgr, &metrics);
+	ret = vega20_get_metrics_table(hwmgr, &metrics, true);
 	if (ret)
 		return ret;
 
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12
  2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
                   ` (14 preceding siblings ...)
  2020-07-31  2:43 ` [PATCH 16/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega20 Evan Quan
@ 2020-07-31  2:43 ` Evan Quan
  2020-08-04 20:41   ` Alex Deucher
  15 siblings, 1 reply; 21+ messages in thread
From: Evan Quan @ 2020-07-31  2:43 UTC (permalink / raw)
  To: amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Evan Quan,
	Harish.Kasiviswanathan, nirmodas

As for the gpu metric export, metrics cache makes no sense. It's up to
user to decide how often the metrics should be retrieved.

Change-Id: Ic2a27ebc90f0a7cf581d0697c121b6d7df030f3b
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 29 ++++++++++++-------
 1 file changed, 18 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 40bb0c2e4e8c..c70c30175801 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1262,22 +1262,29 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
 	return (mem_clk * 100);
 }
 
-static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
+static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
+				    SmuMetrics_t *metrics_table,
+				    bool bypass_cache)
 {
 	struct vega12_hwmgr *data =
 			(struct vega12_hwmgr *)(hwmgr->backend);
 	int ret = 0;
 
-	if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
-		ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
-				TABLE_SMU_METRICS, true);
+	if (bypass_cache ||
+	    !data->metrics_time ||
+	    time_after(jiffies, data->metrics_time + HZ / 2)) {
+		ret = smum_smc_table_manager(hwmgr,
+					     (uint8_t *)(&data->metrics_table),
+					     TABLE_SMU_METRICS,
+					     true);
 		if (ret) {
 			pr_info("Failed to export SMU metrics table!\n");
 			return ret;
 		}
-		memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
 		data->metrics_time = jiffies;
-	} else
+	}
+
+	if (metrics_table)
 		memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
 
 	return ret;
@@ -1288,7 +1295,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
 	SmuMetrics_t metrics_table;
 	int ret = 0;
 
-	ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+	ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
 	if (ret)
 		return ret;
 
@@ -1339,7 +1346,7 @@ static int vega12_get_current_activity_percent(
 	SmuMetrics_t metrics_table;
 	int ret = 0;
 
-	ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+	ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
 	if (ret)
 		return ret;
 
@@ -1387,7 +1394,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
-		ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+		ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
 		if (ret)
 			return ret;
 
@@ -1396,7 +1403,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_MEM_TEMP:
-		ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+		ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
 		if (ret)
 			return ret;
 
@@ -2752,7 +2759,7 @@ static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
 	uint32_t fan_speed_rpm;
 	int ret;
 
-	ret = vega12_get_metrics_table(hwmgr, &metrics);
+	ret = vega12_get_metrics_table(hwmgr, &metrics, true);
 	if (ret)
 		return ret;
 
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 08/17] drm/amd/powerplay: add Renoir support for gpu metrics export(V2)
  2020-07-31  2:43 ` [PATCH 08/17] drm/amd/powerplay: add Renoir support for gpu metrics export(V2) Evan Quan
@ 2020-07-31 14:41   ` Nirmoy
  0 siblings, 0 replies; 21+ messages in thread
From: Nirmoy @ 2020-07-31 14:41 UTC (permalink / raw)
  To: Evan Quan, amd-gfx
  Cc: alexander.deucher, Felix.Kuehling, Harish.Kasiviswanathan

Acked-by: Nirmoy Das <nirmoy.das@amd.com>

On 7/31/20 4:43 AM, Evan Quan wrote:
> Add Renoir gpu metrics export interface.
>
> V2: use memcpy to make code more compact
>
> Change-Id: Ic83265536eeaa9e458dc395b2be18ea49da4c68a
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
> ---
>   drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h |  2 +
>   drivers/gpu/drm/amd/powerplay/renoir_ppt.c    | 80 ++++++++++++++++++-
>   drivers/gpu/drm/amd/powerplay/smu_v12_0.c     | 12 +++
>   3 files changed, 91 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> index 02de3b6199e5..fa2e8cb07967 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> @@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
>   
>   int smu_v12_0_set_driver_table_location(struct smu_context *smu);
>   
> +void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
> +
>   #endif
>   #endif
> diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> index 575ae4be98a2..61e8700a7bdb 100644
> --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> @@ -166,18 +166,32 @@ static int renoir_init_smc_tables(struct smu_context *smu)
>   
>   	smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
>   	if (!smu_table->clocks_table)
> -		return -ENOMEM;
> +		goto err0_out;
>   
>   	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
>   	if (!smu_table->metrics_table)
> -		return -ENOMEM;
> +		goto err1_out;
>   	smu_table->metrics_time = 0;
>   
>   	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
>   	if (!smu_table->watermarks_table)
> -		return -ENOMEM;
> +		goto err2_out;
> +
> +	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_0);
> +	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
> +	if (!smu_table->gpu_metrics_table)
> +		goto err3_out;
>   
>   	return 0;
> +
> +err3_out:
> +	kfree(smu_table->watermarks_table);
> +err2_out:
> +	kfree(smu_table->metrics_table);
> +err1_out:
> +	kfree(smu_table->clocks_table);
> +err0_out:
> +	return -ENOMEM;
>   }
>   
>   /**
> @@ -995,6 +1009,65 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
>   
>   }
>   
> +static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
> +				      void **table)
> +{
> +	struct smu_table_context *smu_table = &smu->smu_table;
> +	struct gpu_metrics_v2_0 *gpu_metrics =
> +		(struct gpu_metrics_v2_0 *)smu_table->gpu_metrics_table;
> +	SmuMetrics_t metrics;
> +	int ret = 0;
> +
> +	ret = renoir_get_metrics_table(smu, &metrics);
> +	if (ret)
> +		return ret;
> +
> +	smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics);
> +
> +	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
> +	gpu_metrics->temperature_soc = metrics.SocTemperature;
> +	memcpy(&gpu_metrics->temperature_core[0],
> +		&metrics.CoreTemperature[0],
> +		sizeof(uint16_t) * 8);
> +	gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
> +	gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
> +
> +	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
> +	gpu_metrics->average_mm_activity = metrics.AverageUvdActivity;
> +
> +	gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
> +	gpu_metrics->average_cpu_power = metrics.Power[0];
> +	gpu_metrics->average_soc_power = metrics.Power[1];
> +	memcpy(&gpu_metrics->average_core_power[0],
> +		&metrics.CorePower[0],
> +		sizeof(uint16_t) * 8);
> +
> +	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
> +	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
> +	gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency;
> +	gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency;
> +
> +	gpu_metrics->current_gfxclk = metrics.ClockFrequency[CLOCK_GFXCLK];
> +	gpu_metrics->current_socclk = metrics.ClockFrequency[CLOCK_SOCCLK];
> +	gpu_metrics->current_uclk = metrics.ClockFrequency[CLOCK_UMCCLK];
> +	gpu_metrics->current_fclk = metrics.ClockFrequency[CLOCK_FCLK];
> +	gpu_metrics->current_vclk = metrics.ClockFrequency[CLOCK_VCLK];
> +	gpu_metrics->current_dclk = metrics.ClockFrequency[CLOCK_DCLK];
> +	memcpy(&gpu_metrics->current_coreclk[0],
> +		&metrics.CoreFrequency[0],
> +		sizeof(uint16_t) * 8);
> +	gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
> +	gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
> +
> +	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
> +
> +	gpu_metrics->fan_pwm = metrics.FanPwm;
> +
> +	*table = (void *)gpu_metrics;
> +
> +	return sizeof(struct gpu_metrics_v2_0);
> +}
> +
>   static const struct pptable_funcs renoir_ppt_funcs = {
>   	.set_power_state = NULL,
>   	.print_clk_levels = renoir_print_clk_levels,
> @@ -1029,6 +1102,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
>   	.is_dpm_running = renoir_is_dpm_running,
>   	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
>   	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
> +	.get_gpu_metrics = renoir_get_gpu_metrics,
>   };
>   
>   void renoir_set_ppt_funcs(struct smu_context *smu)
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> index 31456437bb18..660f403d5770 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> @@ -274,3 +274,15 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
>   
>   	return ret;
>   }
> +
> +void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
> +{
> +	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
> +
> +	gpu_metrics->common_header.structure_size =
> +				sizeof(struct gpu_metrics_v2_0);
> +	gpu_metrics->common_header.format_revision = 2;
> +	gpu_metrics->common_header.content_revision = 0;
> +
> +	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
> +}
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 11/17] drm/amd/powerplay: add Vega12 support for gpu metrics export
  2020-07-31  2:43 ` [PATCH 11/17] drm/amd/powerplay: add Vega12 " Evan Quan
@ 2020-08-04 20:40   ` Alex Deucher
  0 siblings, 0 replies; 21+ messages in thread
From: Alex Deucher @ 2020-08-04 20:40 UTC (permalink / raw)
  To: Evan Quan
  Cc: Deucher, Alexander, Kuehling, Felix, Harish Kasiviswanathan,
	Nirmoy, amd-gfx list

Patches 9-11 are:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

On Thu, Jul 30, 2020 at 10:44 PM Evan Quan <evan.quan@amd.com> wrote:
>
> Add Vega12 gpu metrics export interface.
>
> Change-Id: I2c910f523049f0f90eecb8d74cb73ebb39a22bd9
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 111 ++++++++++++++++++
>  .../drm/amd/powerplay/hwmgr/vega12_hwmgr.h    |   1 +
>  2 files changed, 112 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index a678a67f1c0d..40bb0c2e4e8c 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -47,6 +47,13 @@
>  #include "pp_thermal.h"
>  #include "vega12_baco.h"
>
> +#define smnPCIE_LC_SPEED_CNTL                  0x11140290
> +#define smnPCIE_LC_LINK_WIDTH_CNTL             0x11140288
> +
> +#define LINK_WIDTH_MAX                         6
> +#define LINK_SPEED_MAX                         3
> +static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> +static int link_speed[] = {25, 50, 80, 160};
>
>  static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
>                 enum pp_clock_type type, uint32_t mask);
> @@ -2095,6 +2102,46 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
>         return 0;
>  }
>
> +static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
> +{
> +       struct amdgpu_device *adev = hwmgr->adev;
> +
> +       return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
> +               PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
> +               >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
> +}
> +
> +static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> +{
> +       uint32_t width_level;
> +
> +       width_level = vega12_get_current_pcie_link_width_level(hwmgr);
> +       if (width_level > LINK_WIDTH_MAX)
> +               width_level = 0;
> +
> +       return link_width[width_level];
> +}
> +
> +static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
> +{
> +       struct amdgpu_device *adev = hwmgr->adev;
> +
> +       return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
> +               PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
> +               >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
> +}
> +
> +static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> +{
> +       uint32_t speed_level;
> +
> +       speed_level = vega12_get_current_pcie_link_speed_level(hwmgr);
> +       if (speed_level > LINK_SPEED_MAX)
> +               speed_level = 0;
> +
> +       return link_speed[speed_level];
> +}
> +
>  static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
>                 enum pp_clock_type type, char *buf)
>  {
> @@ -2682,6 +2729,69 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
>         return 0;
>  }
>
> +static void vega12_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
> +{
> +       memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
> +
> +       gpu_metrics->common_header.structure_size =
> +                               sizeof(struct gpu_metrics_v1_0);
> +       gpu_metrics->common_header.format_revision = 1;
> +       gpu_metrics->common_header.content_revision = 0;
> +
> +       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
> +}
> +
> +static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
> +                                     void **table)
> +{
> +       struct vega12_hwmgr *data =
> +                       (struct vega12_hwmgr *)(hwmgr->backend);
> +       struct gpu_metrics_v1_0 *gpu_metrics =
> +                       &data->gpu_metrics_table;
> +       SmuMetrics_t metrics;
> +       uint32_t fan_speed_rpm;
> +       int ret;
> +
> +       ret = vega12_get_metrics_table(hwmgr, &metrics);
> +       if (ret)
> +               return ret;
> +
> +       vega12_init_gpu_metrics_v1_0(gpu_metrics);
> +
> +       gpu_metrics->temperature_edge = metrics.TemperatureEdge;
> +       gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
> +       gpu_metrics->temperature_mem = metrics.TemperatureHBM;
> +       gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
> +       gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
> +
> +       gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
> +       gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
> +
> +       gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
> +       gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
> +       gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
> +
> +       gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
> +       gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
> +       gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
> +       gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
> +       gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
> +
> +       gpu_metrics->throttle_status = metrics.ThrottlerStatus;
> +
> +       vega12_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
> +       gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
> +
> +       gpu_metrics->pcie_link_width =
> +                       vega12_get_current_pcie_link_width(hwmgr);
> +       gpu_metrics->pcie_link_speed =
> +                       vega12_get_current_pcie_link_speed(hwmgr);
> +
> +       *table = (void *)gpu_metrics;
> +
> +       return sizeof(struct gpu_metrics_v1_0);
> +}
> +
>  static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
>         .backend_init = vega12_hwmgr_backend_init,
>         .backend_fini = vega12_hwmgr_backend_fini,
> @@ -2739,6 +2849,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
>         .get_ppfeature_status = vega12_get_ppfeature_status,
>         .set_ppfeature_status = vega12_set_ppfeature_status,
>         .set_mp1_state = vega12_set_mp1_state,
> +       .get_gpu_metrics = vega12_get_gpu_metrics,
>  };
>
>  int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> index 73875399666a..aa63ae41942d 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
> @@ -399,6 +399,7 @@ struct vega12_hwmgr {
>
>         unsigned long                  metrics_time;
>         SmuMetrics_t                   metrics_table;
> +       struct gpu_metrics_v1_0        gpu_metrics_table;
>  };
>
>  #define VEGA12_DPM2_NEAR_TDP_DEC                      10
> --
> 2.28.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12
  2020-07-31  2:43 ` [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12 Evan Quan
@ 2020-08-04 20:41   ` Alex Deucher
  2020-08-05  3:14     ` Quan, Evan
  0 siblings, 1 reply; 21+ messages in thread
From: Alex Deucher @ 2020-08-04 20:41 UTC (permalink / raw)
  To: Evan Quan
  Cc: Deucher, Alexander, Kuehling, Felix, Harish Kasiviswanathan,
	Nirmoy, amd-gfx list

Do we want the metrics cache at all? I can see arguments both ways.
Patches 12-17 are:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

On Thu, Jul 30, 2020 at 10:45 PM Evan Quan <evan.quan@amd.com> wrote:
>
> As for the gpu metric export, metrics cache makes no sense. It's up to
> user to decide how often the metrics should be retrieved.
>
> Change-Id: Ic2a27ebc90f0a7cf581d0697c121b6d7df030f3b
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 29 ++++++++++++-------
>  1 file changed, 18 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index 40bb0c2e4e8c..c70c30175801 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1262,22 +1262,29 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
>         return (mem_clk * 100);
>  }
>
> -static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
> +static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
> +                                   SmuMetrics_t *metrics_table,
> +                                   bool bypass_cache)
>  {
>         struct vega12_hwmgr *data =
>                         (struct vega12_hwmgr *)(hwmgr->backend);
>         int ret = 0;
>
> -       if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
> -               ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
> -                               TABLE_SMU_METRICS, true);
> +       if (bypass_cache ||
> +           !data->metrics_time ||
> +           time_after(jiffies, data->metrics_time + HZ / 2)) {
> +               ret = smum_smc_table_manager(hwmgr,
> +                                            (uint8_t *)(&data->metrics_table),
> +                                            TABLE_SMU_METRICS,
> +                                            true);
>                 if (ret) {
>                         pr_info("Failed to export SMU metrics table!\n");
>                         return ret;
>                 }
> -               memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
>                 data->metrics_time = jiffies;
> -       } else
> +       }
> +
> +       if (metrics_table)
>                 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
>
>         return ret;
> @@ -1288,7 +1295,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
>         SmuMetrics_t metrics_table;
>         int ret = 0;
>
> -       ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +       ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
>         if (ret)
>                 return ret;
>
> @@ -1339,7 +1346,7 @@ static int vega12_get_current_activity_percent(
>         SmuMetrics_t metrics_table;
>         int ret = 0;
>
> -       ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +       ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
>         if (ret)
>                 return ret;
>
> @@ -1387,7 +1394,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
>                 *size = 4;
>                 break;
>         case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
> -               ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +               ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
>                 if (ret)
>                         return ret;
>
> @@ -1396,7 +1403,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
>                 *size = 4;
>                 break;
>         case AMDGPU_PP_SENSOR_MEM_TEMP:
> -               ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +               ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
>                 if (ret)
>                         return ret;
>
> @@ -2752,7 +2759,7 @@ static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
>         uint32_t fan_speed_rpm;
>         int ret;
>
> -       ret = vega12_get_metrics_table(hwmgr, &metrics);
> +       ret = vega12_get_metrics_table(hwmgr, &metrics, true);
>         if (ret)
>                 return ret;
>
> --
> 2.28.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

* RE: [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12
  2020-08-04 20:41   ` Alex Deucher
@ 2020-08-05  3:14     ` Quan, Evan
  0 siblings, 0 replies; 21+ messages in thread
From: Quan, Evan @ 2020-08-05  3:14 UTC (permalink / raw)
  To: Alex Deucher
  Cc: Deucher, Alexander, Kuehling, Felix, Das, Nirmoy,
	Kasiviswanathan, Harish, amd-gfx list

[AMD Official Use Only - Internal Distribution Only]

The cache is useful for the case like sysfs "amdgpu_pm_info". Which inquires many metrics data in a very short period.
Without the cache, there will be multiple table transfers triggered(unnecessary as the PMFW sample interval is 1ms).

The unreasonable setting in our driver is the cache interval. For this special ASIC(vega12), 0.5S is used which is too big I think.
It should not be bigger than the PMFW sample internal setting(1ms). Otherwise we may get outdated data.

BR
Evan
-----Original Message-----
From: Alex Deucher <alexdeucher@gmail.com>
Sent: Wednesday, August 5, 2020 4:42 AM
To: Quan, Evan <Evan.Quan@amd.com>
Cc: amd-gfx list <amd-gfx@lists.freedesktop.org>; Deucher, Alexander <Alexander.Deucher@amd.com>; Kuehling, Felix <Felix.Kuehling@amd.com>; Kasiviswanathan, Harish <Harish.Kasiviswanathan@amd.com>; Das, Nirmoy <Nirmoy.Das@amd.com>
Subject: Re: [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12

Do we want the metrics cache at all? I can see arguments both ways.
Patches 12-17 are:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

On Thu, Jul 30, 2020 at 10:45 PM Evan Quan <evan.quan@amd.com> wrote:
>
> As for the gpu metric export, metrics cache makes no sense. It's up to
> user to decide how often the metrics should be retrieved.
>
> Change-Id: Ic2a27ebc90f0a7cf581d0697c121b6d7df030f3b
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 29 ++++++++++++-------
>  1 file changed, 18 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index 40bb0c2e4e8c..c70c30175801 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1262,22 +1262,29 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
>         return (mem_clk * 100);
>  }
>
> -static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
> SmuMetrics_t *metrics_table)
> +static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
> +                                   SmuMetrics_t *metrics_table,
> +                                   bool bypass_cache)
>  {
>         struct vega12_hwmgr *data =
>                         (struct vega12_hwmgr *)(hwmgr->backend);
>         int ret = 0;
>
> -       if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
> -               ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
> -                               TABLE_SMU_METRICS, true);
> +       if (bypass_cache ||
> +           !data->metrics_time ||
> +           time_after(jiffies, data->metrics_time + HZ / 2)) {
> +               ret = smum_smc_table_manager(hwmgr,
> +                                            (uint8_t *)(&data->metrics_table),
> +                                            TABLE_SMU_METRICS,
> +                                            true);
>                 if (ret) {
>                         pr_info("Failed to export SMU metrics table!\n");
>                         return ret;
>                 }
> -               memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
>                 data->metrics_time = jiffies;
> -       } else
> +       }
> +
> +       if (metrics_table)
>                 memcpy(metrics_table, &data->metrics_table,
> sizeof(SmuMetrics_t));
>
>         return ret;
> @@ -1288,7 +1295,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
>         SmuMetrics_t metrics_table;
>         int ret = 0;
>
> -       ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +       ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
>         if (ret)
>                 return ret;
>
> @@ -1339,7 +1346,7 @@ static int vega12_get_current_activity_percent(
>         SmuMetrics_t metrics_table;
>         int ret = 0;
>
> -       ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +       ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
>         if (ret)
>                 return ret;
>
> @@ -1387,7 +1394,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
>                 *size = 4;
>                 break;
>         case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
> -               ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +               ret = vega12_get_metrics_table(hwmgr, &metrics_table,
> + false);
>                 if (ret)
>                         return ret;
>
> @@ -1396,7 +1403,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
>                 *size = 4;
>                 break;
>         case AMDGPU_PP_SENSOR_MEM_TEMP:
> -               ret = vega12_get_metrics_table(hwmgr, &metrics_table);
> +               ret = vega12_get_metrics_table(hwmgr, &metrics_table,
> + false);
>                 if (ret)
>                         return ret;
>
> @@ -2752,7 +2759,7 @@ static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
>         uint32_t fan_speed_rpm;
>         int ret;
>
> -       ret = vega12_get_metrics_table(hwmgr, &metrics);
> +       ret = vega12_get_metrics_table(hwmgr, &metrics, true);
>         if (ret)
>                 return ret;
>
> --
> 2.28.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=02%7C01%7Cev
> an.quan%40amd.com%7C1bca63d3f90048d72d9808d838b6dd64%7C3dd8961fe4884e6
> 08e11a82d994e183d%7C0%7C0%7C637321705333790290&amp;sdata=b%2FJEpeIXuqH
> H%2BkiBxqIYMGVyirYGsCs5RiUq%2Bqp64oE%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2020-08-05  3:14 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
2020-07-31  2:43 ` [PATCH 02/17] drm/amd/powerplay: add new sysfs interface for retrieving gpu metrics(V2) Evan Quan
2020-07-31  2:43 ` [PATCH 03/17] drm/amd/powerplay: implement SMU V11 common APIs for retrieving link speed/width Evan Quan
2020-07-31  2:43 ` [PATCH 04/17] drm/amd/powerplay: add Arcturus support for gpu metrics export Evan Quan
2020-07-31  2:43 ` [PATCH 05/17] drm/amd/powerplay: update the data structure for NV12 SmuMetrics Evan Quan
2020-07-31  2:43 ` [PATCH 06/17] drm/amd/powerplay: add Navi1x support for gpu metrics export Evan Quan
2020-07-31  2:43 ` [PATCH 07/17] drm/amd/powerplay: add Sienna Cichlid " Evan Quan
2020-07-31  2:43 ` [PATCH 08/17] drm/amd/powerplay: add Renoir support for gpu metrics export(V2) Evan Quan
2020-07-31 14:41   ` Nirmoy
2020-07-31  2:43 ` [PATCH 09/17] drm/amd/powerplay: enable gpu_metrics export on legacy powerplay routines Evan Quan
2020-07-31  2:43 ` [PATCH 10/17] drm/amd/powerplay: add Vega20 support for gpu metrics export Evan Quan
2020-07-31  2:43 ` [PATCH 11/17] drm/amd/powerplay: add Vega12 " Evan Quan
2020-08-04 20:40   ` Alex Deucher
2020-07-31  2:43 ` [PATCH 12/17] drm/amd/powerplay: add control method to bypass metrics cache on Arcturus Evan Quan
2020-07-31  2:43 ` [PATCH 13/17] drm/amd/powerplay: add control method to bypass metrics cache on Navi10 Evan Quan
2020-07-31  2:43 ` [PATCH 14/17] drm/amd/powerplay: add control method to bypass metrics cache on Sienna Cichlid Evan Quan
2020-07-31  2:43 ` [PATCH 15/17] drm/amd/powerplay: add control method to bypass metrics cache on Renoir Evan Quan
2020-07-31  2:43 ` [PATCH 16/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega20 Evan Quan
2020-07-31  2:43 ` [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12 Evan Quan
2020-08-04 20:41   ` Alex Deucher
2020-08-05  3:14     ` Quan, Evan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).