All of lore.kernel.org
 help / color / mirror / Atom feed
From: Evan Quan <evan.quan@amd.com>
To: amd-gfx@lists.freedesktop.org
Cc: alexander.deucher@amd.com, Felix.Kuehling@amd.com,
	Evan Quan <evan.quan@amd.com>,
	Harish.Kasiviswanathan@amd.com, nirmodas@amd.com
Subject: [PATCH 10/17] drm/amd/powerplay: add Vega20 support for gpu metrics export
Date: Fri, 31 Jul 2020 10:43:09 +0800	[thread overview]
Message-ID: <20200731024316.28324-10-evan.quan@amd.com> (raw)
In-Reply-To: <20200731024316.28324-1-evan.quan@amd.com>

Add Vega20 gpu metrics export interface.

Change-Id: I7b4ab850358cc6d7455889d9031a7111cba35ebd
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c    | 123 +++++++++++++++++-
 .../drm/amd/powerplay/hwmgr/vega20_hwmgr.h    |   1 +
 2 files changed, 117 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index bacbe2fa1f9a..037bebda2eae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -55,6 +55,11 @@
 #define smnPCIE_LC_SPEED_CNTL			0x11140290
 #define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
 
+#define LINK_WIDTH_MAX				6
+#define LINK_SPEED_MAX				3
+static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static int link_speed[] = {25, 50, 80, 160};
+
 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
 {
 	struct vega20_hwmgr *data =
@@ -3265,6 +3270,46 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
 	return 0;
 }
 
+static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
+{
+	uint32_t width_level;
+
+	width_level = vega20_get_current_pcie_link_width_level(hwmgr);
+	if (width_level > LINK_WIDTH_MAX)
+		width_level = 0;
+
+	return link_width[width_level];
+}
+
+static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+		PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
+static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
+{
+	uint32_t speed_level;
+
+	speed_level = vega20_get_current_pcie_link_speed_level(hwmgr);
+	if (speed_level > LINK_SPEED_MAX)
+		speed_level = 0;
+
+	return link_speed[speed_level];
+}
+
 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, char *buf)
 {
@@ -3277,7 +3322,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 	struct phm_ppt_v3_information *pptable_information =
 		(struct phm_ppt_v3_information *)hwmgr->pptable;
 	PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
-	struct amdgpu_device *adev = hwmgr->adev;
 	struct pp_clock_levels_with_latency clocks;
 	struct vega20_single_dpm_table *fclk_dpm_table =
 			&(data->dpm_table.fclk_table);
@@ -3371,12 +3415,10 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 		break;
 
 	case PP_PCIE:
-		current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
-			     PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
-			    >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
-		current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
-			      PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
-			    >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+		current_gen_speed =
+			vega20_get_current_pcie_link_speed_level(hwmgr);
+		current_lane_width =
+			vega20_get_current_pcie_link_width_level(hwmgr);
 		for (i = 0; i < NUM_LINK_LEVELS; i++) {
 			if (i == 1 && data->pcie_parameters_override) {
 				gen_speed = data->pcie_gen_level1;
@@ -4218,6 +4260,72 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
 	return ret;
 }
 
+static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
+{
+	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
+
+	gpu_metrics->common_header.structure_size =
+				sizeof(struct gpu_metrics_v1_0);
+	gpu_metrics->common_header.format_revision = 1;
+	gpu_metrics->common_header.content_revision = 0;
+
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
+
+static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
+				      void **table)
+{
+	struct vega20_hwmgr *data =
+			(struct vega20_hwmgr *)(hwmgr->backend);
+	struct gpu_metrics_v1_0 *gpu_metrics =
+			&data->gpu_metrics_table;
+	SmuMetrics_t metrics;
+	uint32_t fan_speed_rpm;
+	int ret;
+
+	ret = vega20_get_metrics_table(hwmgr, &metrics);
+	if (ret)
+		return ret;
+
+	vega20_init_gpu_metrics_v1_0(gpu_metrics);
+
+	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
+	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+
+	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+	vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
+	gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
+
+	gpu_metrics->pcie_link_width =
+			vega20_get_current_pcie_link_width(hwmgr);
+	gpu_metrics->pcie_link_speed =
+			vega20_get_current_pcie_link_speed(hwmgr);
+
+	*table = (void *)gpu_metrics;
+
+	return sizeof(struct gpu_metrics_v1_0);
+}
+
 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
 	/* init/fini related */
 	.backend_init = vega20_hwmgr_backend_init,
@@ -4288,6 +4396,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
 	.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
 	.set_df_cstate = vega20_set_df_cstate,
 	.set_xgmi_pstate = vega20_set_xgmi_pstate,
+	.get_gpu_metrics = vega20_get_gpu_metrics,
 };
 
 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 2c3125f82b24..075c0094da9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -527,6 +527,7 @@ struct vega20_hwmgr {
 
 	unsigned long                  metrics_time;
 	SmuMetrics_t                   metrics_table;
+	struct gpu_metrics_v1_0        gpu_metrics_table;
 
 	bool                           pcie_parameters_override;
 	uint32_t                       pcie_gen_level1;
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2020-07-31  2:44 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-31  2:43 [PATCH 01/17] drm/amd/powerplay: define an universal data structure for gpu metrics (V4) Evan Quan
2020-07-31  2:43 ` [PATCH 02/17] drm/amd/powerplay: add new sysfs interface for retrieving gpu metrics(V2) Evan Quan
2020-07-31  2:43 ` [PATCH 03/17] drm/amd/powerplay: implement SMU V11 common APIs for retrieving link speed/width Evan Quan
2020-07-31  2:43 ` [PATCH 04/17] drm/amd/powerplay: add Arcturus support for gpu metrics export Evan Quan
2020-07-31  2:43 ` [PATCH 05/17] drm/amd/powerplay: update the data structure for NV12 SmuMetrics Evan Quan
2020-07-31  2:43 ` [PATCH 06/17] drm/amd/powerplay: add Navi1x support for gpu metrics export Evan Quan
2020-07-31  2:43 ` [PATCH 07/17] drm/amd/powerplay: add Sienna Cichlid " Evan Quan
2020-07-31  2:43 ` [PATCH 08/17] drm/amd/powerplay: add Renoir support for gpu metrics export(V2) Evan Quan
2020-07-31 14:41   ` Nirmoy
2020-07-31  2:43 ` [PATCH 09/17] drm/amd/powerplay: enable gpu_metrics export on legacy powerplay routines Evan Quan
2020-07-31  2:43 ` Evan Quan [this message]
2020-07-31  2:43 ` [PATCH 11/17] drm/amd/powerplay: add Vega12 support for gpu metrics export Evan Quan
2020-08-04 20:40   ` Alex Deucher
2020-07-31  2:43 ` [PATCH 12/17] drm/amd/powerplay: add control method to bypass metrics cache on Arcturus Evan Quan
2020-07-31  2:43 ` [PATCH 13/17] drm/amd/powerplay: add control method to bypass metrics cache on Navi10 Evan Quan
2020-07-31  2:43 ` [PATCH 14/17] drm/amd/powerplay: add control method to bypass metrics cache on Sienna Cichlid Evan Quan
2020-07-31  2:43 ` [PATCH 15/17] drm/amd/powerplay: add control method to bypass metrics cache on Renoir Evan Quan
2020-07-31  2:43 ` [PATCH 16/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega20 Evan Quan
2020-07-31  2:43 ` [PATCH 17/17] drm/amd/powerplay: add control method to bypass metrics cache on Vega12 Evan Quan
2020-08-04 20:41   ` Alex Deucher
2020-08-05  3:14     ` Quan, Evan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200731024316.28324-10-evan.quan@amd.com \
    --to=evan.quan@amd.com \
    --cc=Felix.Kuehling@amd.com \
    --cc=Harish.Kasiviswanathan@amd.com \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=nirmodas@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.