All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory
@ 2019-12-31  2:48 Evan Quan
  2019-12-31  2:48 ` [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU Evan Quan
  2020-01-01 14:20 ` [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory Alex Deucher
  0 siblings, 2 replies; 5+ messages in thread
From: Evan Quan @ 2019-12-31  2:48 UTC (permalink / raw)
  To: amd-gfx; +Cc: alexander.deucher, Evan Quan

So that we do not need to allocate a piece of VRAM for it. This
is a preparation for coming change which unifies the VRAM address
for all driver tables interaction with SMU.

Change-Id: I967f960a10a19957ea7301aa40a8ced0c036ad68
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c    | 21 +++++++++----------
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |  1 +
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c    |  4 ++++
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c    |  4 ++++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c     |  2 ++
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c    |  4 ++++
 6 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f9bf69cd72a5..09e16183a769 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1899,26 +1899,25 @@ int smu_set_df_cstate(struct smu_context *smu,
 
 int smu_write_watermarks_table(struct smu_context *smu)
 {
-	int ret = 0;
-	struct smu_table_context *smu_table = &smu->smu_table;
-	struct smu_table *table = NULL;
-
-	table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+	void *watermarks_table = smu->smu_table.watermarks_table;
 
-	if (!table->cpu_addr)
+	if (!watermarks_table)
 		return -EINVAL;
 
-	ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+	return smu_update_table(smu,
+				SMU_TABLE_WATERMARKS,
+				0,
+				watermarks_table,
 				true);
-
-	return ret;
 }
 
 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
 		struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-	struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
-	void *table = watermarks->cpu_addr;
+	void *table = smu->smu_table.watermarks_table;
+
+	if (!table)
+		return -EINVAL;
 
 	mutex_lock(&smu->mutex);
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 541cfde289ea..30da8328d1bc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -254,6 +254,7 @@ struct smu_table_context
 	unsigned long			metrics_time;
 	void				*metrics_table;
 	void				*clocks_table;
+	void				*watermarks_table;
 
 	void				*max_sustainable_clocks;
 	struct smu_bios_boot_up_values	boot_values;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index e7ab8caee222..3387f3243a01 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -555,6 +555,10 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
 		return -ENOMEM;
 	smu_table->metrics_time = 0;
 
+	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+	if (!smu_table->watermarks_table)
+		return -ENOMEM;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index e73644beffd9..506cc6bf4bc0 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -209,6 +209,10 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
 		return -ENOMEM;
 	smu_table->metrics_time = 0;
 
+	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+	if (!smu_table->watermarks_table)
+		return -ENOMEM;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index a85826471d82..6fb93eb6ab39 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -450,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 
 	kfree(smu_table->tables);
 	kfree(smu_table->metrics_table);
+	kfree(smu_table->watermarks_table);
 	smu_table->tables = NULL;
 	smu_table->metrics_table = NULL;
+	smu_table->watermarks_table = NULL;
 	smu_table->metrics_time = 0;
 
 	ret = smu_v11_0_fini_dpm_context(smu);
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 534c46bc0146..27bdcdeb08d9 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -338,6 +338,10 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
 		return -ENOMEM;
 	smu_table->metrics_time = 0;
 
+	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+	if (!smu_table->watermarks_table)
+		return -ENOMEM;
+
 	return 0;
 }
 
-- 
2.24.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU
  2019-12-31  2:48 [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory Evan Quan
@ 2019-12-31  2:48 ` Evan Quan
  2020-01-01 14:19   ` Alex Deucher
  2020-01-01 14:20 ` [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory Alex Deucher
  1 sibling, 1 reply; 5+ messages in thread
From: Evan Quan @ 2019-12-31  2:48 UTC (permalink / raw)
  To: amd-gfx; +Cc: alexander.deucher, Evan Quan

By this, we can avoid to pass in the VRAM address on every table
transferring. That puts extra unnecessary traffics on SMU on
some cases(e.g. polling the amdgpu_pm_info sysfs interface).

Change-Id: Ifb74d9cd89790b301e88d472b29cdb9b0365b65a
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c    | 98 ++++++++++++-------
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  |  3 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |  2 +
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +
 drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h |  2 +
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c    |  1 +
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c    |  1 +
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 +
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c     | 18 ++++
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c     | 26 +++--
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c    |  1 +
 11 files changed, 109 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 09e16183a769..290976f5f6c2 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -490,26 +490,19 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct amdgpu_device *adev = smu->adev;
-	struct smu_table *table = NULL;
-	int ret = 0;
+	struct smu_table *table = &smu_table->driver_table;
 	int table_id = smu_table_get_index(smu, table_index);
+	uint32_t table_size;
+	int ret = 0;
 
 	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
 		return -EINVAL;
 
-	table = &smu_table->tables[table_index];
+	table_size = smu_table->tables[table_index].size;
 
 	if (drv2smu)
-		memcpy(table->cpu_addr, table_data, table->size);
+		memcpy(table->cpu_addr, table_data, table_size);
 
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
-					  upper_32_bits(table->mc_address));
-	if (ret)
-		return ret;
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
-					  lower_32_bits(table->mc_address));
-	if (ret)
-		return ret;
 	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
 					  SMU_MSG_TransferTableDram2Smu :
 					  SMU_MSG_TransferTableSmu2Dram,
@@ -521,7 +514,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 	adev->nbio.funcs->hdp_flush(adev, NULL);
 
 	if (!drv2smu)
-		memcpy(table_data, table->cpu_addr, table->size);
+		memcpy(table_data, table->cpu_addr, table_size);
 
 	return ret;
 }
@@ -948,32 +941,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
 	struct amdgpu_device *adev = smu->adev;
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct smu_table *tables = smu_table->tables;
+	struct smu_table *driver_table = &(smu_table->driver_table);
+	uint32_t max_table_size = 0;
 	int ret, i;
 
-	for (i = 0; i < SMU_TABLE_COUNT; i++) {
-		if (tables[i].size == 0)
-			continue;
+	/* VRAM allocation for tool table */
+	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
 		ret = amdgpu_bo_create_kernel(adev,
-					      tables[i].size,
-					      tables[i].align,
-					      tables[i].domain,
-					      &tables[i].bo,
-					      &tables[i].mc_address,
-					      &tables[i].cpu_addr);
-		if (ret)
-			goto failed;
+					      tables[SMU_TABLE_PMSTATUSLOG].size,
+					      tables[SMU_TABLE_PMSTATUSLOG].align,
+					      tables[SMU_TABLE_PMSTATUSLOG].domain,
+					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
+					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+		if (ret) {
+			pr_err("VRAM allocation for tool table failed!\n");
+			return ret;
+		}
 	}
 
-	return 0;
-failed:
-	while (--i >= 0) {
+	/* VRAM allocation for driver table */
+	for (i = 0; i < SMU_TABLE_COUNT; i++) {
 		if (tables[i].size == 0)
 			continue;
-		amdgpu_bo_free_kernel(&tables[i].bo,
-				      &tables[i].mc_address,
-				      &tables[i].cpu_addr);
 
+		if (i == SMU_TABLE_PMSTATUSLOG)
+			continue;
+
+		if (max_table_size < tables[i].size)
+			max_table_size = tables[i].size;
+	}
+
+	driver_table->size = max_table_size;
+	driver_table->align = PAGE_SIZE;
+	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+	ret = amdgpu_bo_create_kernel(adev,
+				      driver_table->size,
+				      driver_table->align,
+				      driver_table->domain,
+				      &driver_table->bo,
+				      &driver_table->mc_address,
+				      &driver_table->cpu_addr);
+	if (ret) {
+		pr_err("VRAM allocation for driver table failed!\n");
+		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
 	}
+
 	return ret;
 }
 
@@ -981,18 +998,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
 	struct smu_table *tables = smu_table->tables;
-	uint32_t i = 0;
+	struct smu_table *driver_table = &(smu_table->driver_table);
 
 	if (!tables)
 		return 0;
 
-	for (i = 0; i < SMU_TABLE_COUNT; i++) {
-		if (tables[i].size == 0)
-			continue;
-		amdgpu_bo_free_kernel(&tables[i].bo,
-				      &tables[i].mc_address,
-				      &tables[i].cpu_addr);
-	}
+	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+
+	amdgpu_bo_free_kernel(&driver_table->bo,
+			      &driver_table->mc_address,
+			      &driver_table->cpu_addr);
 
 	return 0;
 }
@@ -1063,6 +1081,10 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 
 	/* smu_dump_pptable(smu); */
 
+	ret = smu_set_driver_table_location(smu);
+	if (ret)
+		return ret;
+
 	/*
 	 * Copy pptable bo in the vram to smc with SMU MSGs such as
 	 * SetDriverDramAddr and TransferTableDram2Smu.
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 50b317f4b1e6..064b5201a8a7 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -2022,7 +2022,7 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
 	SwI2cRequest_t req;
 	struct amdgpu_device *adev = to_amdgpu_device(control);
 	struct smu_table_context *smu_table = &adev->smu.smu_table;
-	struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+	struct smu_table *table = &smu_table->driver_table;
 
 	memset(&req, 0, sizeof(req));
 	arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
@@ -2261,6 +2261,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+	.set_driver_table_location = smu_v11_0_set_driver_table_location,
 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 30da8328d1bc..121bf81eced5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -260,6 +260,7 @@ struct smu_table_context
 	struct smu_bios_boot_up_values	boot_values;
 	void                            *driver_pptable;
 	struct smu_table		*tables;
+	struct smu_table		driver_table;
 	struct smu_table		memory_pool;
 	uint8_t                         thermal_controller_type;
 
@@ -498,6 +499,7 @@ struct pptable_funcs {
 	int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
 	int (*write_pptable)(struct smu_context *smu);
 	int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+	int (*set_driver_table_location)(struct smu_context *smu);
 	int (*set_tool_table_location)(struct smu_context *smu);
 	int (*notify_memory_pool_location)(struct smu_context *smu);
 	int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index db3f78676aeb..662989296174 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context *smu);
 
 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
 
+int smu_v11_0_set_driver_table_location(struct smu_context *smu);
+
 int smu_v11_0_set_tool_table_location(struct smu_context *smu);
 
 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 3f1cd06e273c..d79e54b5ebf6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -90,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu);
 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			    uint32_t min, uint32_t max);
 
+int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 3387f3243a01..aa206bde619b 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -2120,6 +2120,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+	.set_driver_table_location = smu_v11_0_set_driver_table_location,
 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 506cc6bf4bc0..861e6410363b 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -920,6 +920,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
 	.mode2_reset = smu_v12_0_mode2_reset,
 	.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
+	.set_driver_table_location = smu_v12_0_set_driver_table_location,
 };
 
 void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 77864e4236c4..783319ec8bf9 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -61,6 +61,8 @@
 	((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
 #define smu_set_min_dcef_deep_sleep(smu) \
 	((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_driver_table_location(smu) \
+	((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0)
 #define smu_set_tool_table_location(smu) \
 	((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
 #define smu_notify_memory_pool_location(smu) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 6fb93eb6ab39..e804f9854027 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -776,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
 	return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
 }
 
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+	struct smu_table *driver_table = &smu->smu_table.driver_table;
+	int ret = 0;
+
+	if (driver_table->mc_address) {
+		ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrHigh,
+				upper_32_bits(driver_table->mc_address));
+		if (!ret)
+			ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrLow,
+				lower_32_bits(driver_table->mc_address));
+	}
+
+	return ret;
+}
+
 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 {
 	int ret = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 2ac7f2f231b6..2b1ef9eb0db6 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -318,14 +318,6 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
 int smu_v12_0_populate_smc_tables(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
-	struct smu_table *table = NULL;
-
-	table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
-	if (!table)
-		return -EINVAL;
-
-	if (!table->cpu_addr)
-		return -EINVAL;
 
 	return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
 }
@@ -514,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 
 	return ret;
 }
+
+int smu_v12_0_set_driver_table_location(struct smu_context *smu)
+{
+	struct smu_table *driver_table = &smu->smu_table.driver_table;
+	int ret = 0;
+
+	if (driver_table->mc_address) {
+		ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrHigh,
+				upper_32_bits(driver_table->mc_address));
+		if (!ret)
+			ret = smu_send_smc_msg_with_param(smu,
+				SMU_MSG_SetDriverDramAddrLow,
+				lower_32_bits(driver_table->mc_address));
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 27bdcdeb08d9..38febd5ca4da 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3236,6 +3236,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+	.set_driver_table_location = smu_v11_0_set_driver_table_location,
 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
-- 
2.24.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU
  2019-12-31  2:48 ` [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU Evan Quan
@ 2020-01-01 14:19   ` Alex Deucher
  2020-01-02  2:22     ` Quan, Evan
  0 siblings, 1 reply; 5+ messages in thread
From: Alex Deucher @ 2020-01-01 14:19 UTC (permalink / raw)
  To: Evan Quan; +Cc: Deucher, Alexander, amd-gfx list

On Mon, Dec 30, 2019 at 9:49 PM Evan Quan <evan.quan@amd.com> wrote:
>
> By this, we can avoid to pass in the VRAM address on every table
> transferring. That puts extra unnecessary traffics on SMU on
> some cases(e.g. polling the amdgpu_pm_info sysfs interface).
>
> Change-Id: Ifb74d9cd89790b301e88d472b29cdb9b0365b65a
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c    | 98 ++++++++++++-------
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  |  3 +-
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |  2 +
>  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +
>  drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h |  2 +
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c    |  1 +
>  drivers/gpu/drm/amd/powerplay/renoir_ppt.c    |  1 +
>  drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 +
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c     | 18 ++++
>  drivers/gpu/drm/amd/powerplay/smu_v12_0.c     | 26 +++--
>  drivers/gpu/drm/amd/powerplay/vega20_ppt.c    |  1 +
>  11 files changed, 109 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 09e16183a769..290976f5f6c2 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -490,26 +490,19 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
>  {
>         struct smu_table_context *smu_table = &smu->smu_table;
>         struct amdgpu_device *adev = smu->adev;
> -       struct smu_table *table = NULL;
> -       int ret = 0;
> +       struct smu_table *table = &smu_table->driver_table;
>         int table_id = smu_table_get_index(smu, table_index);
> +       uint32_t table_size;
> +       int ret = 0;
>
>         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
>                 return -EINVAL;
>
> -       table = &smu_table->tables[table_index];
> +       table_size = smu_table->tables[table_index].size;
>
>         if (drv2smu)
> -               memcpy(table->cpu_addr, table_data, table->size);
> +               memcpy(table->cpu_addr, table_data, table_size);
>
> -       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
> -                                         upper_32_bits(table->mc_address));
> -       if (ret)
> -               return ret;
> -       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
> -                                         lower_32_bits(table->mc_address));
> -       if (ret)
> -               return ret;
>         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
>                                           SMU_MSG_TransferTableDram2Smu :
>                                           SMU_MSG_TransferTableSmu2Dram,
> @@ -521,7 +514,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
>         adev->nbio.funcs->hdp_flush(adev, NULL);
>
>         if (!drv2smu)
> -               memcpy(table_data, table->cpu_addr, table->size);
> +               memcpy(table_data, table->cpu_addr, table_size);
>
>         return ret;
>  }
> @@ -948,32 +941,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
>         struct amdgpu_device *adev = smu->adev;
>         struct smu_table_context *smu_table = &smu->smu_table;
>         struct smu_table *tables = smu_table->tables;
> +       struct smu_table *driver_table = &(smu_table->driver_table);
> +       uint32_t max_table_size = 0;
>         int ret, i;
>
> -       for (i = 0; i < SMU_TABLE_COUNT; i++) {
> -               if (tables[i].size == 0)
> -                       continue;
> +       /* VRAM allocation for tool table */
> +       if (tables[SMU_TABLE_PMSTATUSLOG].size) {
>                 ret = amdgpu_bo_create_kernel(adev,
> -                                             tables[i].size,
> -                                             tables[i].align,
> -                                             tables[i].domain,
> -                                             &tables[i].bo,
> -                                             &tables[i].mc_address,
> -                                             &tables[i].cpu_addr);
> -               if (ret)
> -                       goto failed;
> +                                             tables[SMU_TABLE_PMSTATUSLOG].size,
> +                                             tables[SMU_TABLE_PMSTATUSLOG].align,
> +                                             tables[SMU_TABLE_PMSTATUSLOG].domain,
> +                                             &tables[SMU_TABLE_PMSTATUSLOG].bo,
> +                                             &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> +                                             &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> +               if (ret) {
> +                       pr_err("VRAM allocation for tool table failed!\n");
> +                       return ret;
> +               }
>         }
>
> -       return 0;
> -failed:
> -       while (--i >= 0) {
> +       /* VRAM allocation for driver table */
> +       for (i = 0; i < SMU_TABLE_COUNT; i++) {
>                 if (tables[i].size == 0)
>                         continue;
> -               amdgpu_bo_free_kernel(&tables[i].bo,
> -                                     &tables[i].mc_address,
> -                                     &tables[i].cpu_addr);
>
> +               if (i == SMU_TABLE_PMSTATUSLOG)
> +                       continue;
> +
> +               if (max_table_size < tables[i].size)
> +                       max_table_size = tables[i].size;
> +       }

It would probably be good to document somewhere that the table bo
allocation is just a staging buffer for uploading and downloading
tables from the SMU.  The SMU actually stores the active tables in
SRAM.  That confused me at first when reviewing this patch.

> +
> +       driver_table->size = max_table_size;
> +       driver_table->align = PAGE_SIZE;
> +       driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
> +
> +       ret = amdgpu_bo_create_kernel(adev,
> +                                     driver_table->size,
> +                                     driver_table->align,
> +                                     driver_table->domain,
> +                                     &driver_table->bo,
> +                                     &driver_table->mc_address,
> +                                     &driver_table->cpu_addr);
> +       if (ret) {
> +               pr_err("VRAM allocation for driver table failed!\n");
> +               if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> +                       amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
> +                                             &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> +                                             &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
>         }
> +
>         return ret;
>  }
>
> @@ -981,18 +998,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
>  {
>         struct smu_table_context *smu_table = &smu->smu_table;
>         struct smu_table *tables = smu_table->tables;
> -       uint32_t i = 0;
> +       struct smu_table *driver_table = &(smu_table->driver_table);
>
>         if (!tables)
>                 return 0;
>
> -       for (i = 0; i < SMU_TABLE_COUNT; i++) {
> -               if (tables[i].size == 0)
> -                       continue;
> -               amdgpu_bo_free_kernel(&tables[i].bo,
> -                                     &tables[i].mc_address,
> -                                     &tables[i].cpu_addr);
> -       }
> +       if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> +               amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
> +                                     &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> +                                     &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> +
> +       amdgpu_bo_free_kernel(&driver_table->bo,
> +                             &driver_table->mc_address,
> +                             &driver_table->cpu_addr);
>
>         return 0;
>  }
> @@ -1063,6 +1081,10 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
>
>         /* smu_dump_pptable(smu); */
>
> +       ret = smu_set_driver_table_location(smu);
> +       if (ret)
> +               return ret;
> +
>         /*
>          * Copy pptable bo in the vram to smc with SMU MSGs such as
>          * SetDriverDramAddr and TransferTableDram2Smu.
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index 50b317f4b1e6..064b5201a8a7 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2022,7 +2022,7 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
>         SwI2cRequest_t req;
>         struct amdgpu_device *adev = to_amdgpu_device(control);
>         struct smu_table_context *smu_table = &adev->smu.smu_table;
> -       struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
> +       struct smu_table *table = &smu_table->driver_table;
>
>         memset(&req, 0, sizeof(req));
>         arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
> @@ -2261,6 +2261,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
>         .check_fw_version = smu_v11_0_check_fw_version,
>         .write_pptable = smu_v11_0_write_pptable,
>         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
> +       .set_driver_table_location = smu_v11_0_set_driver_table_location,
>         .set_tool_table_location = smu_v11_0_set_tool_table_location,
>         .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
>         .system_features_control = smu_v11_0_system_features_control,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 30da8328d1bc..121bf81eced5 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -260,6 +260,7 @@ struct smu_table_context
>         struct smu_bios_boot_up_values  boot_values;
>         void                            *driver_pptable;
>         struct smu_table                *tables;
> +       struct smu_table                driver_table;

This would be a good place to document that the driver table is just a
staging buffer for uploading/downloading content from the SMU.  Can
you check that we properly lock the staging buffer in all the call
paths that interact with SMU?  If we do that, I think we can clean up
and simplify a lot of the other locking we have already.

>         struct smu_table                memory_pool;
>         uint8_t                         thermal_controller_type;
>
> @@ -498,6 +499,7 @@ struct pptable_funcs {
>         int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
>         int (*write_pptable)(struct smu_context *smu);
>         int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
> +       int (*set_driver_table_location)(struct smu_context *smu);
>         int (*set_tool_table_location)(struct smu_context *smu);
>         int (*notify_memory_pool_location)(struct smu_context *smu);
>         int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> index db3f78676aeb..662989296174 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> @@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context *smu);
>
>  int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
>
> +int smu_v11_0_set_driver_table_location(struct smu_context *smu);
> +
>  int smu_v11_0_set_tool_table_location(struct smu_context *smu);
>
>  int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> index 3f1cd06e273c..d79e54b5ebf6 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> @@ -90,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu);
>  int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
>                             uint32_t min, uint32_t max);
>
> +int smu_v12_0_set_driver_table_location(struct smu_context *smu);
> +
>  #endif
> diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> index 3387f3243a01..aa206bde619b 100644
> --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> @@ -2120,6 +2120,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
>         .check_fw_version = smu_v11_0_check_fw_version,
>         .write_pptable = smu_v11_0_write_pptable,
>         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
> +       .set_driver_table_location = smu_v11_0_set_driver_table_location,
>         .set_tool_table_location = smu_v11_0_set_tool_table_location,
>         .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
>         .system_features_control = smu_v11_0_system_features_control,
> diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> index 506cc6bf4bc0..861e6410363b 100644
> --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> @@ -920,6 +920,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
>         .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
>         .mode2_reset = smu_v12_0_mode2_reset,
>         .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
> +       .set_driver_table_location = smu_v12_0_set_driver_table_location,
>  };
>
>  void renoir_set_ppt_funcs(struct smu_context *smu)
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> index 77864e4236c4..783319ec8bf9 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
> +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> @@ -61,6 +61,8 @@
>         ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
>  #define smu_set_min_dcef_deep_sleep(smu) \
>         ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
> +#define smu_set_driver_table_location(smu) \
> +       ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0)
>  #define smu_set_tool_table_location(smu) \
>         ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
>  #define smu_notify_memory_pool_location(smu) \
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> index 6fb93eb6ab39..e804f9854027 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> @@ -776,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
>         return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
>  }
>
> +int smu_v11_0_set_driver_table_location(struct smu_context *smu)
> +{
> +       struct smu_table *driver_table = &smu->smu_table.driver_table;
> +       int ret = 0;
> +
> +       if (driver_table->mc_address) {
> +               ret = smu_send_smc_msg_with_param(smu,
> +                               SMU_MSG_SetDriverDramAddrHigh,
> +                               upper_32_bits(driver_table->mc_address));
> +               if (!ret)
> +                       ret = smu_send_smc_msg_with_param(smu,
> +                               SMU_MSG_SetDriverDramAddrLow,
> +                               lower_32_bits(driver_table->mc_address));
> +       }
> +
> +       return ret;
> +}
> +
>  int smu_v11_0_set_tool_table_location(struct smu_context *smu)
>  {
>         int ret = 0;
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> index 2ac7f2f231b6..2b1ef9eb0db6 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> @@ -318,14 +318,6 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
>  int smu_v12_0_populate_smc_tables(struct smu_context *smu)
>  {
>         struct smu_table_context *smu_table = &smu->smu_table;
> -       struct smu_table *table = NULL;
> -
> -       table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
> -       if (!table)
> -               return -EINVAL;
> -
> -       if (!table->cpu_addr)
> -               return -EINVAL;
>
>         return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
>  }
> @@ -514,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
>
>         return ret;
>  }
> +
> +int smu_v12_0_set_driver_table_location(struct smu_context *smu)
> +{
> +       struct smu_table *driver_table = &smu->smu_table.driver_table;
> +       int ret = 0;
> +
> +       if (driver_table->mc_address) {
> +               ret = smu_send_smc_msg_with_param(smu,
> +                               SMU_MSG_SetDriverDramAddrHigh,
> +                               upper_32_bits(driver_table->mc_address));
> +               if (!ret)
> +                       ret = smu_send_smc_msg_with_param(smu,
> +                               SMU_MSG_SetDriverDramAddrLow,
> +                               lower_32_bits(driver_table->mc_address));
> +       }
> +
> +       return ret;
> +}
> diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> index 27bdcdeb08d9..38febd5ca4da 100644
> --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> @@ -3236,6 +3236,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
>         .check_fw_version = smu_v11_0_check_fw_version,
>         .write_pptable = smu_v11_0_write_pptable,
>         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
> +       .set_driver_table_location = smu_v11_0_set_driver_table_location,
>         .set_tool_table_location = smu_v11_0_set_tool_table_location,
>         .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
>         .system_features_control = smu_v11_0_system_features_control,
> --
> 2.24.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory
  2019-12-31  2:48 [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory Evan Quan
  2019-12-31  2:48 ` [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU Evan Quan
@ 2020-01-01 14:20 ` Alex Deucher
  1 sibling, 0 replies; 5+ messages in thread
From: Alex Deucher @ 2020-01-01 14:20 UTC (permalink / raw)
  To: Evan Quan; +Cc: Deucher, Alexander, amd-gfx list

On Mon, Dec 30, 2019 at 9:49 PM Evan Quan <evan.quan@amd.com> wrote:
>
> So that we do not need to allocate a piece of VRAM for it. This
> is a preparation for coming change which unifies the VRAM address
> for all driver tables interaction with SMU.
>
> Change-Id: I967f960a10a19957ea7301aa40a8ced0c036ad68
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c    | 21 +++++++++----------
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |  1 +
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c    |  4 ++++
>  drivers/gpu/drm/amd/powerplay/renoir_ppt.c    |  4 ++++
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c     |  2 ++
>  drivers/gpu/drm/amd/powerplay/vega20_ppt.c    |  4 ++++
>  6 files changed, 25 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index f9bf69cd72a5..09e16183a769 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1899,26 +1899,25 @@ int smu_set_df_cstate(struct smu_context *smu,
>
>  int smu_write_watermarks_table(struct smu_context *smu)
>  {
> -       int ret = 0;
> -       struct smu_table_context *smu_table = &smu->smu_table;
> -       struct smu_table *table = NULL;
> -
> -       table = &smu_table->tables[SMU_TABLE_WATERMARKS];
> +       void *watermarks_table = smu->smu_table.watermarks_table;
>
> -       if (!table->cpu_addr)
> +       if (!watermarks_table)
>                 return -EINVAL;
>
> -       ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
> +       return smu_update_table(smu,
> +                               SMU_TABLE_WATERMARKS,
> +                               0,
> +                               watermarks_table,
>                                 true);
> -
> -       return ret;
>  }
>
>  int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
>                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
>  {
> -       struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
> -       void *table = watermarks->cpu_addr;
> +       void *table = smu->smu_table.watermarks_table;
> +
> +       if (!table)
> +               return -EINVAL;
>
>         mutex_lock(&smu->mutex);
>
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 541cfde289ea..30da8328d1bc 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -254,6 +254,7 @@ struct smu_table_context
>         unsigned long                   metrics_time;
>         void                            *metrics_table;
>         void                            *clocks_table;
> +       void                            *watermarks_table;
>
>         void                            *max_sustainable_clocks;
>         struct smu_bios_boot_up_values  boot_values;
> diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> index e7ab8caee222..3387f3243a01 100644
> --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> @@ -555,6 +555,10 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
>                 return -ENOMEM;
>         smu_table->metrics_time = 0;
>
> +       smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
> +       if (!smu_table->watermarks_table)
> +               return -ENOMEM;
> +
>         return 0;
>  }
>
> diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> index e73644beffd9..506cc6bf4bc0 100644
> --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> @@ -209,6 +209,10 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
>                 return -ENOMEM;
>         smu_table->metrics_time = 0;
>
> +       smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
> +       if (!smu_table->watermarks_table)
> +               return -ENOMEM;
> +
>         return 0;
>  }
>
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> index a85826471d82..6fb93eb6ab39 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> @@ -450,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
>
>         kfree(smu_table->tables);
>         kfree(smu_table->metrics_table);
> +       kfree(smu_table->watermarks_table);
>         smu_table->tables = NULL;
>         smu_table->metrics_table = NULL;
> +       smu_table->watermarks_table = NULL;
>         smu_table->metrics_time = 0;
>
>         ret = smu_v11_0_fini_dpm_context(smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> index 534c46bc0146..27bdcdeb08d9 100644
> --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> @@ -338,6 +338,10 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
>                 return -ENOMEM;
>         smu_table->metrics_time = 0;
>
> +       smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
> +       if (!smu_table->watermarks_table)
> +               return -ENOMEM;
> +
>         return 0;
>  }
>
> --
> 2.24.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU
  2020-01-01 14:19   ` Alex Deucher
@ 2020-01-02  2:22     ` Quan, Evan
  0 siblings, 0 replies; 5+ messages in thread
From: Quan, Evan @ 2020-01-02  2:22 UTC (permalink / raw)
  To: Alex Deucher; +Cc: Deucher, Alexander, amd-gfx list



> -----Original Message-----
> From: Alex Deucher <alexdeucher@gmail.com>
> Sent: Wednesday, January 1, 2020 10:19 PM
> To: Quan, Evan <Evan.Quan@amd.com>
> Cc: amd-gfx list <amd-gfx@lists.freedesktop.org>; Deucher, Alexander
> <Alexander.Deucher@amd.com>
> Subject: Re: [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver
> table interaction with SMU
> 
> On Mon, Dec 30, 2019 at 9:49 PM Evan Quan <evan.quan@amd.com> wrote:
> >
> > By this, we can avoid to pass in the VRAM address on every table
> > transferring. That puts extra unnecessary traffics on SMU on
> > some cases(e.g. polling the amdgpu_pm_info sysfs interface).
> >
> > Change-Id: Ifb74d9cd89790b301e88d472b29cdb9b0365b65a
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > ---
> >  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c    | 98 ++++++++++++-------
> >  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  |  3 +-
> >  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |  2 +
> >  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 +
> >  drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h |  2 +
> >  drivers/gpu/drm/amd/powerplay/navi10_ppt.c    |  1 +
> >  drivers/gpu/drm/amd/powerplay/renoir_ppt.c    |  1 +
> >  drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 +
> >  drivers/gpu/drm/amd/powerplay/smu_v11_0.c     | 18 ++++
> >  drivers/gpu/drm/amd/powerplay/smu_v12_0.c     | 26 +++--
> >  drivers/gpu/drm/amd/powerplay/vega20_ppt.c    |  1 +
> >  11 files changed, 109 insertions(+), 47 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index 09e16183a769..290976f5f6c2 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -490,26 +490,19 @@ int smu_update_table(struct smu_context *smu,
> enum smu_table_id table_index, int
> >  {
> >         struct smu_table_context *smu_table = &smu->smu_table;
> >         struct amdgpu_device *adev = smu->adev;
> > -       struct smu_table *table = NULL;
> > -       int ret = 0;
> > +       struct smu_table *table = &smu_table->driver_table;
> >         int table_id = smu_table_get_index(smu, table_index);
> > +       uint32_t table_size;
> > +       int ret = 0;
> >
> >         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
> >                 return -EINVAL;
> >
> > -       table = &smu_table->tables[table_index];
> > +       table_size = smu_table->tables[table_index].size;
> >
> >         if (drv2smu)
> > -               memcpy(table->cpu_addr, table_data, table->size);
> > +               memcpy(table->cpu_addr, table_data, table_size);
> >
> > -       ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_SetDriverDramAddrHigh,
> > -                                         upper_32_bits(table->mc_address));
> > -       if (ret)
> > -               return ret;
> > -       ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_SetDriverDramAddrLow,
> > -                                         lower_32_bits(table->mc_address));
> > -       if (ret)
> > -               return ret;
> >         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
> >                                           SMU_MSG_TransferTableDram2Smu :
> >                                           SMU_MSG_TransferTableSmu2Dram,
> > @@ -521,7 +514,7 @@ int smu_update_table(struct smu_context *smu,
> enum smu_table_id table_index, int
> >         adev->nbio.funcs->hdp_flush(adev, NULL);
> >
> >         if (!drv2smu)
> > -               memcpy(table_data, table->cpu_addr, table->size);
> > +               memcpy(table_data, table->cpu_addr, table_size);
> >
> >         return ret;
> >  }
> > @@ -948,32 +941,56 @@ static int smu_init_fb_allocations(struct
> smu_context *smu)
> >         struct amdgpu_device *adev = smu->adev;
> >         struct smu_table_context *smu_table = &smu->smu_table;
> >         struct smu_table *tables = smu_table->tables;
> > +       struct smu_table *driver_table = &(smu_table->driver_table);
> > +       uint32_t max_table_size = 0;
> >         int ret, i;
> >
> > -       for (i = 0; i < SMU_TABLE_COUNT; i++) {
> > -               if (tables[i].size == 0)
> > -                       continue;
> > +       /* VRAM allocation for tool table */
> > +       if (tables[SMU_TABLE_PMSTATUSLOG].size) {
> >                 ret = amdgpu_bo_create_kernel(adev,
> > -                                             tables[i].size,
> > -                                             tables[i].align,
> > -                                             tables[i].domain,
> > -                                             &tables[i].bo,
> > -                                             &tables[i].mc_address,
> > -                                             &tables[i].cpu_addr);
> > -               if (ret)
> > -                       goto failed;
> > +                                             tables[SMU_TABLE_PMSTATUSLOG].size,
> > +                                             tables[SMU_TABLE_PMSTATUSLOG].align,
> > +                                             tables[SMU_TABLE_PMSTATUSLOG].domain,
> > +                                             &tables[SMU_TABLE_PMSTATUSLOG].bo,
> > +
> &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> > +                                             &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> > +               if (ret) {
> > +                       pr_err("VRAM allocation for tool table failed!\n");
> > +                       return ret;
> > +               }
> >         }
> >
> > -       return 0;
> > -failed:
> > -       while (--i >= 0) {
> > +       /* VRAM allocation for driver table */
> > +       for (i = 0; i < SMU_TABLE_COUNT; i++) {
> >                 if (tables[i].size == 0)
> >                         continue;
> > -               amdgpu_bo_free_kernel(&tables[i].bo,
> > -                                     &tables[i].mc_address,
> > -                                     &tables[i].cpu_addr);
> >
> > +               if (i == SMU_TABLE_PMSTATUSLOG)
> > +                       continue;
> > +
> > +               if (max_table_size < tables[i].size)
> > +                       max_table_size = tables[i].size;
> > +       }
> 
> It would probably be good to document somewhere that the table bo
> allocation is just a staging buffer for uploading and downloading
> tables from the SMU.  The SMU actually stores the active tables in
> SRAM.  That confused me at first when reviewing this patch.
> 
> > +
> > +       driver_table->size = max_table_size;
> > +       driver_table->align = PAGE_SIZE;
> > +       driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
> > +
> > +       ret = amdgpu_bo_create_kernel(adev,
> > +                                     driver_table->size,
> > +                                     driver_table->align,
> > +                                     driver_table->domain,
> > +                                     &driver_table->bo,
> > +                                     &driver_table->mc_address,
> > +                                     &driver_table->cpu_addr);
> > +       if (ret) {
> > +               pr_err("VRAM allocation for driver table failed!\n");
> > +               if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> > +
> amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
> > +
> &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> > +                                             &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> >         }
> > +
> >         return ret;
> >  }
> >
> > @@ -981,18 +998,19 @@ static int smu_fini_fb_allocations(struct
> smu_context *smu)
> >  {
> >         struct smu_table_context *smu_table = &smu->smu_table;
> >         struct smu_table *tables = smu_table->tables;
> > -       uint32_t i = 0;
> > +       struct smu_table *driver_table = &(smu_table->driver_table);
> >
> >         if (!tables)
> >                 return 0;
> >
> > -       for (i = 0; i < SMU_TABLE_COUNT; i++) {
> > -               if (tables[i].size == 0)
> > -                       continue;
> > -               amdgpu_bo_free_kernel(&tables[i].bo,
> > -                                     &tables[i].mc_address,
> > -                                     &tables[i].cpu_addr);
> > -       }
> > +       if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> > +               amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
> > +                                     &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
> > +                                     &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
> > +
> > +       amdgpu_bo_free_kernel(&driver_table->bo,
> > +                             &driver_table->mc_address,
> > +                             &driver_table->cpu_addr);
> >
> >         return 0;
> >  }
> > @@ -1063,6 +1081,10 @@ static int smu_smc_table_hw_init(struct
> smu_context *smu,
> >
> >         /* smu_dump_pptable(smu); */
> >
> > +       ret = smu_set_driver_table_location(smu);
> > +       if (ret)
> > +               return ret;
> > +
> >         /*
> >          * Copy pptable bo in the vram to smc with SMU MSGs such as
> >          * SetDriverDramAddr and TransferTableDram2Smu.
> > diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > index 50b317f4b1e6..064b5201a8a7 100644
> > --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > @@ -2022,7 +2022,7 @@ static int arcturus_i2c_eeprom_read_data(struct
> i2c_adapter *control,
> >         SwI2cRequest_t req;
> >         struct amdgpu_device *adev = to_amdgpu_device(control);
> >         struct smu_table_context *smu_table = &adev->smu.smu_table;
> > -       struct smu_table *table = &smu_table-
> >tables[SMU_TABLE_I2C_COMMANDS];
> > +       struct smu_table *table = &smu_table->driver_table;
> >
> >         memset(&req, 0, sizeof(req));
> >         arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
> > @@ -2261,6 +2261,7 @@ static const struct pptable_funcs
> arcturus_ppt_funcs = {
> >         .check_fw_version = smu_v11_0_check_fw_version,
> >         .write_pptable = smu_v11_0_write_pptable,
> >         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
> > +       .set_driver_table_location = smu_v11_0_set_driver_table_location,
> >         .set_tool_table_location = smu_v11_0_set_tool_table_location,
> >         .notify_memory_pool_location =
> smu_v11_0_notify_memory_pool_location,
> >         .system_features_control = smu_v11_0_system_features_control,
> > diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > index 30da8328d1bc..121bf81eced5 100644
> > --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > @@ -260,6 +260,7 @@ struct smu_table_context
> >         struct smu_bios_boot_up_values  boot_values;
> >         void                            *driver_pptable;
> >         struct smu_table                *tables;
> > +       struct smu_table                driver_table;
> 
> This would be a good place to document that the driver table is just a
> staging buffer for uploading/downloading content from the SMU.  Can
> you check that we properly lock the staging buffer in all the call
> paths that interact with SMU?  If we do that, I think we can clean up
> and simplify a lot of the other locking we have already.
[Quan, Evan] Will document that in V2. This table is used for transfer
Pptable/watermarktable/metrics table/activity monitor table from SMU.
And as I confirmed, all of them already have proper lock protection on their call path.
So, there is no need to add new lock protection around this.
> 
> >         struct smu_table                memory_pool;
> >         uint8_t                         thermal_controller_type;
> >
> > @@ -498,6 +499,7 @@ struct pptable_funcs {
> >         int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
> >         int (*write_pptable)(struct smu_context *smu);
> >         int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
> > +       int (*set_driver_table_location)(struct smu_context *smu);
> >         int (*set_tool_table_location)(struct smu_context *smu);
> >         int (*notify_memory_pool_location)(struct smu_context *smu);
> >         int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
> > diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> > index db3f78676aeb..662989296174 100644
> > --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> > +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> > @@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context
> *smu);
> >
> >  int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
> >
> > +int smu_v11_0_set_driver_table_location(struct smu_context *smu);
> > +
> >  int smu_v11_0_set_tool_table_location(struct smu_context *smu);
> >
> >  int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
> > diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> > index 3f1cd06e273c..d79e54b5ebf6 100644
> > --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> > +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> > @@ -90,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu);
> >  int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum
> smu_clk_type clk_type,
> >                             uint32_t min, uint32_t max);
> >
> > +int smu_v12_0_set_driver_table_location(struct smu_context *smu);
> > +
> >  #endif
> > diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> > index 3387f3243a01..aa206bde619b 100644
> > --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> > @@ -2120,6 +2120,7 @@ static const struct pptable_funcs navi10_ppt_funcs
> = {
> >         .check_fw_version = smu_v11_0_check_fw_version,
> >         .write_pptable = smu_v11_0_write_pptable,
> >         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
> > +       .set_driver_table_location = smu_v11_0_set_driver_table_location,
> >         .set_tool_table_location = smu_v11_0_set_tool_table_location,
> >         .notify_memory_pool_location =
> smu_v11_0_notify_memory_pool_location,
> >         .system_features_control = smu_v11_0_system_features_control,
> > diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> > index 506cc6bf4bc0..861e6410363b 100644
> > --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> > @@ -920,6 +920,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
> >         .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
> >         .mode2_reset = smu_v12_0_mode2_reset,
> >         .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
> > +       .set_driver_table_location = smu_v12_0_set_driver_table_location,
> >  };
> >
> >  void renoir_set_ppt_funcs(struct smu_context *smu)
> > diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h
> b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> > index 77864e4236c4..783319ec8bf9 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
> > +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> > @@ -61,6 +61,8 @@
> >         ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs-
> >write_pptable((smu)) : 0)
> >  #define smu_set_min_dcef_deep_sleep(smu) \
> >         ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs-
> >set_min_dcef_deep_sleep((smu)) : 0)
> > +#define smu_set_driver_table_location(smu) \
> > +       ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs-
> >set_driver_table_location((smu)) : 0)
> >  #define smu_set_tool_table_location(smu) \
> >         ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs-
> >set_tool_table_location((smu)) : 0)
> >  #define smu_notify_memory_pool_location(smu) \
> > diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > index 6fb93eb6ab39..e804f9854027 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> > @@ -776,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct
> smu_context *smu)
> >         return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context-
> >boot_values.dcefclk / 100);
> >  }
> >
> > +int smu_v11_0_set_driver_table_location(struct smu_context *smu)
> > +{
> > +       struct smu_table *driver_table = &smu->smu_table.driver_table;
> > +       int ret = 0;
> > +
> > +       if (driver_table->mc_address) {
> > +               ret = smu_send_smc_msg_with_param(smu,
> > +                               SMU_MSG_SetDriverDramAddrHigh,
> > +                               upper_32_bits(driver_table->mc_address));
> > +               if (!ret)
> > +                       ret = smu_send_smc_msg_with_param(smu,
> > +                               SMU_MSG_SetDriverDramAddrLow,
> > +                               lower_32_bits(driver_table->mc_address));
> > +       }
> > +
> > +       return ret;
> > +}
> > +
> >  int smu_v11_0_set_tool_table_location(struct smu_context *smu)
> >  {
> >         int ret = 0;
> > diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> > index 2ac7f2f231b6..2b1ef9eb0db6 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> > @@ -318,14 +318,6 @@ int smu_v12_0_fini_smc_tables(struct smu_context
> *smu)
> >  int smu_v12_0_populate_smc_tables(struct smu_context *smu)
> >  {
> >         struct smu_table_context *smu_table = &smu->smu_table;
> > -       struct smu_table *table = NULL;
> > -
> > -       table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
> > -       if (!table)
> > -               return -EINVAL;
> > -
> > -       if (!table->cpu_addr)
> > -               return -EINVAL;
> >
> >         return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table-
> >clocks_table, false);
> >  }
> > @@ -514,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct
> smu_context *smu, enum smu_clk_
> >
> >         return ret;
> >  }
> > +
> > +int smu_v12_0_set_driver_table_location(struct smu_context *smu)
> > +{
> > +       struct smu_table *driver_table = &smu->smu_table.driver_table;
> > +       int ret = 0;
> > +
> > +       if (driver_table->mc_address) {
> > +               ret = smu_send_smc_msg_with_param(smu,
> > +                               SMU_MSG_SetDriverDramAddrHigh,
> > +                               upper_32_bits(driver_table->mc_address));
> > +               if (!ret)
> > +                       ret = smu_send_smc_msg_with_param(smu,
> > +                               SMU_MSG_SetDriverDramAddrLow,
> > +                               lower_32_bits(driver_table->mc_address));
> > +       }
> > +
> > +       return ret;
> > +}
> > diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> > index 27bdcdeb08d9..38febd5ca4da 100644
> > --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> > @@ -3236,6 +3236,7 @@ static const struct pptable_funcs vega20_ppt_funcs
> = {
> >         .check_fw_version = smu_v11_0_check_fw_version,
> >         .write_pptable = smu_v11_0_write_pptable,
> >         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
> > +       .set_driver_table_location = smu_v11_0_set_driver_table_location,
> >         .set_tool_table_location = smu_v11_0_set_tool_table_location,
> >         .notify_memory_pool_location =
> smu_v11_0_notify_memory_pool_location,
> >         .system_features_control = smu_v11_0_system_features_control,
> > --
> > 2.24.0
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> >
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.free
> desktop.org%2Fmailman%2Flistinfo%2Famd-
> gfx&amp;data=02%7C01%7Cevan.quan%40amd.com%7C34dab91ed73e4a29eb
> c908d78ec59e4b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637
> 134851738870652&amp;sdata=6cuhBgRmvJbiCa1ESdszyxb%2BAsuJwC%2FPO3
> Ccm%2B5fE%2Bg%3D&amp;reserved=0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-01-02  2:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-31  2:48 [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory Evan Quan
2019-12-31  2:48 ` [PATCH 2/2] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU Evan Quan
2020-01-01 14:19   ` Alex Deucher
2020-01-02  2:22     ` Quan, Evan
2020-01-01 14:20 ` [PATCH 1/2] drm/amd/powerplay: cache the watermark settings on system memory Alex Deucher

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.