All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amd/powerplay: issue proper hdp flush for table transferring
@ 2020-01-03  9:47 Evan Quan
  2020-01-06  6:34 ` Quan, Evan
  0 siblings, 1 reply; 3+ messages in thread
From: Evan Quan @ 2020-01-03  9:47 UTC (permalink / raw)
  To: amd-gfx; +Cc: Evan Quan

Guard the content consistence between the view of GPU and CPU
during the table transferring.

Change-Id: Ib3cebb97a1c8fb302eb040483bbaf089ae00c6a9
Signed-off-by: Evan Quan <evan.quan@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c        | 15 ++++++++++-----
 .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c   |  5 ++++-
 .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c  |  5 ++++-
 .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c  |  5 ++++-
 .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c  | 10 ++++++++--
 5 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index a56ebcc4e3c7..e1b64134bbd8 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -529,8 +529,14 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 
 	table_size = smu_table->tables[table_index].size;
 
-	if (drv2smu)
+	if (drv2smu) {
 		memcpy(table->cpu_addr, table_data, table_size);
+		/*
+		 * Flush hdp cache: to guard the content seen by
+		 * GPU is consitent with CPU.
+		 */
+		amdgpu_asic_flush_hdp(adev, NULL);
+	}
 
 	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
 					  SMU_MSG_TransferTableDram2Smu :
@@ -539,11 +545,10 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 	if (ret)
 		return ret;
 
-	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
-
-	if (!drv2smu)
+	if (!drv2smu) {
+		amdgpu_asic_flush_hdp(adev, NULL);
 		memcpy(table_data, table->cpu_addr, table_size);
+	}
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index aa0ee2b46135..2319400a3fcb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			priv->smu_tables.entry[table_id].table_id);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 {
 	struct smu10_smumgr *priv =
 			(struct smu10_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
 			"Invalid SMU Table ID!", return -EINVAL;);
@@ -161,6 +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	smu10_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 39427ca32a15..715564009089 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			priv->smu_tables.entry[table_id].table_id);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -70,6 +70,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 		uint8_t *table, int16_t table_id)
 {
 	struct vega10_smumgr *priv = hwmgr->smu_backend;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	/* under sriov, vbios or hypervisor driver
 	 * has already copy table to smc so here only skip it
@@ -87,6 +88,8 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	smu9_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 90c782c132d2..a3915bfcce81 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			return -EINVAL);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 {
 	struct vega12_smumgr *priv =
 			(struct vega12_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
 			"Invalid SMU Table ID!", return -EINVAL);
@@ -95,6 +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index f604612f411f..0db57fb83d30 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
 			return ret);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[table_id].table,
 			priv->smu_tables.entry[table_id].size);
@@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 {
 	struct vega20_smumgr *priv =
 			(struct vega20_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 	int ret = 0;
 
 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
@@ -219,6 +220,8 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
 	memcpy(priv->smu_tables.entry[table_id].table, table,
 			priv->smu_tables.entry[table_id].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
@@ -242,11 +245,14 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
 {
 	struct vega20_smumgr *priv =
 			(struct vega20_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 	int ret = 0;
 
 	memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
 
+	amdgpu_asic_flush_hdp(adev, NULL);
+
 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
 			PPSMC_MSG_SetDriverDramAddrHigh,
 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
@@ -290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
 			return ret);
 
 	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
+	amdgpu_asic_flush_hdp(adev, NULL);
 
 	memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
-- 
2.24.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* RE: [PATCH] drm/amd/powerplay: issue proper hdp flush for table transferring
  2020-01-03  9:47 [PATCH] drm/amd/powerplay: issue proper hdp flush for table transferring Evan Quan
@ 2020-01-06  6:34 ` Quan, Evan
  2020-01-06 23:08   ` Alex Deucher
  0 siblings, 1 reply; 3+ messages in thread
From: Quan, Evan @ 2020-01-06  6:34 UTC (permalink / raw)
  To: amd-gfx

Ping..

> -----Original Message-----
> From: Quan, Evan <Evan.Quan@amd.com>
> Sent: Friday, January 3, 2020 5:47 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Quan, Evan <Evan.Quan@amd.com>
> Subject: [PATCH] drm/amd/powerplay: issue proper hdp flush for table
> transferring
> 
> Guard the content consistence between the view of GPU and CPU during the
> table transferring.
> 
> Change-Id: Ib3cebb97a1c8fb302eb040483bbaf089ae00c6a9
> Signed-off-by: Evan Quan <evan.quan@amd.com>
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c        | 15 ++++++++++-----
>  .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c   |  5 ++++-
>  .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c  |  5 ++++-
>   .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c  |  5 ++++-
>   .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c  | 10 ++++++++--
>  5 files changed, 30 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index a56ebcc4e3c7..e1b64134bbd8 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -529,8 +529,14 @@ int smu_update_table(struct smu_context *smu,
> enum smu_table_id table_index, int
> 
>  	table_size = smu_table->tables[table_index].size;
> 
> -	if (drv2smu)
> +	if (drv2smu) {
>  		memcpy(table->cpu_addr, table_data, table_size);
> +		/*
> +		 * Flush hdp cache: to guard the content seen by
> +		 * GPU is consitent with CPU.
> +		 */
> +		amdgpu_asic_flush_hdp(adev, NULL);
> +	}
> 
>  	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
>  					  SMU_MSG_TransferTableDram2Smu :
> @@ -539,11 +545,10 @@ int smu_update_table(struct smu_context *smu,
> enum smu_table_id table_index, int
>  	if (ret)
>  		return ret;
> 
> -	/* flush hdp cache */
> -	adev->nbio.funcs->hdp_flush(adev, NULL);
> -
> -	if (!drv2smu)
> +	if (!drv2smu) {
> +		amdgpu_asic_flush_hdp(adev, NULL);
>  		memcpy(table_data, table->cpu_addr, table_size);
> +	}
> 
>  	return ret;
>  }
> diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> index aa0ee2b46135..2319400a3fcb 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> @@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct
> pp_hwmgr *hwmgr,
>  			priv->smu_tables.entry[table_id].table_id);
> 
>  	/* flush hdp cache */
> -	adev->nbio.funcs->hdp_flush(adev, NULL);
> +	amdgpu_asic_flush_hdp(adev, NULL);
> 
>  	memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
>  			priv->smu_tables.entry[table_id].size);
> @@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr
> *hwmgr,  {
>  	struct smu10_smumgr *priv =
>  			(struct smu10_smumgr *)(hwmgr->smu_backend);
> +	struct amdgpu_device *adev = hwmgr->adev;
> 
>  	PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
>  			"Invalid SMU Table ID!", return -EINVAL;); @@ -161,6
> +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
>  	memcpy(priv->smu_tables.entry[table_id].table, table,
>  			priv->smu_tables.entry[table_id].size);
> 
> +	amdgpu_asic_flush_hdp(adev, NULL);
> +
>  	smu10_send_msg_to_smc_with_parameter(hwmgr,
>  			PPSMC_MSG_SetDriverDramAddrHigh,
>  			upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr));
> diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> index 39427ca32a15..715564009089 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> @@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr
> *hwmgr,
>  			priv->smu_tables.entry[table_id].table_id);
> 
>  	/* flush hdp cache */
> -	adev->nbio.funcs->hdp_flush(adev, NULL);
> +	amdgpu_asic_flush_hdp(adev, NULL);
> 
>  	memcpy(table, priv->smu_tables.entry[table_id].table,
>  			priv->smu_tables.entry[table_id].size);
> @@ -70,6 +70,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr
> *hwmgr,
>  		uint8_t *table, int16_t table_id)
>  {
>  	struct vega10_smumgr *priv = hwmgr->smu_backend;
> +	struct amdgpu_device *adev = hwmgr->adev;
> 
>  	/* under sriov, vbios or hypervisor driver
>  	 * has already copy table to smc so here only skip it @@ -87,6 +88,8
> @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
>  	memcpy(priv->smu_tables.entry[table_id].table, table,
>  			priv->smu_tables.entry[table_id].size);
> 
> +	amdgpu_asic_flush_hdp(adev, NULL);
> +
>  	smu9_send_msg_to_smc_with_parameter(hwmgr,
>  			PPSMC_MSG_SetDriverDramAddrHigh,
>  			upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr));
> diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> index 90c782c132d2..a3915bfcce81 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> @@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr
> *hwmgr,
>  			return -EINVAL);
> 
>  	/* flush hdp cache */
> -	adev->nbio.funcs->hdp_flush(adev, NULL);
> +	amdgpu_asic_flush_hdp(adev, NULL);
> 
>  	memcpy(table, priv->smu_tables.entry[table_id].table,
>  			priv->smu_tables.entry[table_id].size);
> @@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr
> *hwmgr,  {
>  	struct vega12_smumgr *priv =
>  			(struct vega12_smumgr *)(hwmgr->smu_backend);
> +	struct amdgpu_device *adev = hwmgr->adev;
> 
>  	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
>  			"Invalid SMU Table ID!", return -EINVAL); @@ -95,6
> +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
>  	memcpy(priv->smu_tables.entry[table_id].table, table,
>  			priv->smu_tables.entry[table_id].size);
> 
> +	amdgpu_asic_flush_hdp(adev, NULL);
> +
> 
> 	PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(h
> wmgr,
>  			PPSMC_MSG_SetDriverDramAddrHigh,
>  			upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr)) == 0, diff --git
> a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> index f604612f411f..0db57fb83d30 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> @@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct
> pp_hwmgr *hwmgr,
>  			return ret);
> 
>  	/* flush hdp cache */
> -	adev->nbio.funcs->hdp_flush(adev, NULL);
> +	amdgpu_asic_flush_hdp(adev, NULL);
> 
>  	memcpy(table, priv->smu_tables.entry[table_id].table,
>  			priv->smu_tables.entry[table_id].size);
> @@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr
> *hwmgr,  {
>  	struct vega20_smumgr *priv =
>  			(struct vega20_smumgr *)(hwmgr->smu_backend);
> +	struct amdgpu_device *adev = hwmgr->adev;
>  	int ret = 0;
> 
>  	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, @@ -219,6 +220,8
> @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
>  	memcpy(priv->smu_tables.entry[table_id].table, table,
>  			priv->smu_tables.entry[table_id].size);
> 
> +	amdgpu_asic_flush_hdp(adev, NULL);
> +
>  	PP_ASSERT_WITH_CODE((ret =
> vega20_send_msg_to_smc_with_parameter(hwmgr,
>  			PPSMC_MSG_SetDriverDramAddrHigh,
>  			upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr))) == 0, @@ -242,11 +245,14 @@ int
> vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,  {
>  	struct vega20_smumgr *priv =
>  			(struct vega20_smumgr *)(hwmgr->smu_backend);
> +	struct amdgpu_device *adev = hwmgr->adev;
>  	int ret = 0;
> 
>  	memcpy(priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
>  			priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
> 
> +	amdgpu_asic_flush_hdp(adev, NULL);
> +
>  	PP_ASSERT_WITH_CODE((ret =
> vega20_send_msg_to_smc_with_parameter(hwmgr,
>  			PPSMC_MSG_SetDriverDramAddrHigh,
>  			upper_32_bits(priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, @@ -
> 290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr
> *hwmgr,
>  			return ret);
> 
>  	/* flush hdp cache */
> -	adev->nbio.funcs->hdp_flush(adev, NULL);
> +	amdgpu_asic_flush_hdp(adev, NULL);
> 
>  	memcpy(table, priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
>  			priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
> --
> 2.24.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] drm/amd/powerplay: issue proper hdp flush for table transferring
  2020-01-06  6:34 ` Quan, Evan
@ 2020-01-06 23:08   ` Alex Deucher
  0 siblings, 0 replies; 3+ messages in thread
From: Alex Deucher @ 2020-01-06 23:08 UTC (permalink / raw)
  To: Quan, Evan; +Cc: amd-gfx

On Mon, Jan 6, 2020 at 1:34 AM Quan, Evan <Evan.Quan@amd.com> wrote:
>
> Ping..
>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> > -----Original Message-----
> > From: Quan, Evan <Evan.Quan@amd.com>
> > Sent: Friday, January 3, 2020 5:47 PM
> > To: amd-gfx@lists.freedesktop.org
> > Cc: Quan, Evan <Evan.Quan@amd.com>
> > Subject: [PATCH] drm/amd/powerplay: issue proper hdp flush for table
> > transferring
> >
> > Guard the content consistence between the view of GPU and CPU during the
> > table transferring.
> >
> > Change-Id: Ib3cebb97a1c8fb302eb040483bbaf089ae00c6a9
> > Signed-off-by: Evan Quan <evan.quan@amd.com>
> > ---
> >  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c        | 15 ++++++++++-----
> >  .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c   |  5 ++++-
> >  .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c  |  5 ++++-
> >   .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c  |  5 ++++-
> >   .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c  | 10 ++++++++--
> >  5 files changed, 30 insertions(+), 10 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index a56ebcc4e3c7..e1b64134bbd8 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -529,8 +529,14 @@ int smu_update_table(struct smu_context *smu,
> > enum smu_table_id table_index, int
> >
> >       table_size = smu_table->tables[table_index].size;
> >
> > -     if (drv2smu)
> > +     if (drv2smu) {
> >               memcpy(table->cpu_addr, table_data, table_size);
> > +             /*
> > +              * Flush hdp cache: to guard the content seen by
> > +              * GPU is consitent with CPU.
> > +              */
> > +             amdgpu_asic_flush_hdp(adev, NULL);
> > +     }
> >
> >       ret = smu_send_smc_msg_with_param(smu, drv2smu ?
> >                                         SMU_MSG_TransferTableDram2Smu :
> > @@ -539,11 +545,10 @@ int smu_update_table(struct smu_context *smu,
> > enum smu_table_id table_index, int
> >       if (ret)
> >               return ret;
> >
> > -     /* flush hdp cache */
> > -     adev->nbio.funcs->hdp_flush(adev, NULL);
> > -
> > -     if (!drv2smu)
> > +     if (!drv2smu) {
> > +             amdgpu_asic_flush_hdp(adev, NULL);
> >               memcpy(table_data, table->cpu_addr, table_size);
> > +     }
> >
> >       return ret;
> >  }
> > diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> > b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> > index aa0ee2b46135..2319400a3fcb 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> > @@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct
> > pp_hwmgr *hwmgr,
> >                       priv->smu_tables.entry[table_id].table_id);
> >
> >       /* flush hdp cache */
> > -     adev->nbio.funcs->hdp_flush(adev, NULL);
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> >
> >       memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
> >                       priv->smu_tables.entry[table_id].size);
> > @@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr
> > *hwmgr,  {
> >       struct smu10_smumgr *priv =
> >                       (struct smu10_smumgr *)(hwmgr->smu_backend);
> > +     struct amdgpu_device *adev = hwmgr->adev;
> >
> >       PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
> >                       "Invalid SMU Table ID!", return -EINVAL;); @@ -161,6
> > +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> >       memcpy(priv->smu_tables.entry[table_id].table, table,
> >                       priv->smu_tables.entry[table_id].size);
> >
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> > +
> >       smu10_send_msg_to_smc_with_parameter(hwmgr,
> >                       PPSMC_MSG_SetDriverDramAddrHigh,
> >                       upper_32_bits(priv-
> > >smu_tables.entry[table_id].mc_addr));
> > diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> > b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> > index 39427ca32a15..715564009089 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> > @@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr
> > *hwmgr,
> >                       priv->smu_tables.entry[table_id].table_id);
> >
> >       /* flush hdp cache */
> > -     adev->nbio.funcs->hdp_flush(adev, NULL);
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> >
> >       memcpy(table, priv->smu_tables.entry[table_id].table,
> >                       priv->smu_tables.entry[table_id].size);
> > @@ -70,6 +70,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr
> > *hwmgr,
> >               uint8_t *table, int16_t table_id)
> >  {
> >       struct vega10_smumgr *priv = hwmgr->smu_backend;
> > +     struct amdgpu_device *adev = hwmgr->adev;
> >
> >       /* under sriov, vbios or hypervisor driver
> >        * has already copy table to smc so here only skip it @@ -87,6 +88,8
> > @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> >       memcpy(priv->smu_tables.entry[table_id].table, table,
> >                       priv->smu_tables.entry[table_id].size);
> >
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> > +
> >       smu9_send_msg_to_smc_with_parameter(hwmgr,
> >                       PPSMC_MSG_SetDriverDramAddrHigh,
> >                       upper_32_bits(priv-
> > >smu_tables.entry[table_id].mc_addr));
> > diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> > b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> > index 90c782c132d2..a3915bfcce81 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> > @@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr
> > *hwmgr,
> >                       return -EINVAL);
> >
> >       /* flush hdp cache */
> > -     adev->nbio.funcs->hdp_flush(adev, NULL);
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> >
> >       memcpy(table, priv->smu_tables.entry[table_id].table,
> >                       priv->smu_tables.entry[table_id].size);
> > @@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr
> > *hwmgr,  {
> >       struct vega12_smumgr *priv =
> >                       (struct vega12_smumgr *)(hwmgr->smu_backend);
> > +     struct amdgpu_device *adev = hwmgr->adev;
> >
> >       PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
> >                       "Invalid SMU Table ID!", return -EINVAL); @@ -95,6
> > +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> >       memcpy(priv->smu_tables.entry[table_id].table, table,
> >                       priv->smu_tables.entry[table_id].size);
> >
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> > +
> >
> >       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(h
> > wmgr,
> >                       PPSMC_MSG_SetDriverDramAddrHigh,
> >                       upper_32_bits(priv-
> > >smu_tables.entry[table_id].mc_addr)) == 0, diff --git
> > a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> > b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> > index f604612f411f..0db57fb83d30 100644
> > --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> > @@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct
> > pp_hwmgr *hwmgr,
> >                       return ret);
> >
> >       /* flush hdp cache */
> > -     adev->nbio.funcs->hdp_flush(adev, NULL);
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> >
> >       memcpy(table, priv->smu_tables.entry[table_id].table,
> >                       priv->smu_tables.entry[table_id].size);
> > @@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr
> > *hwmgr,  {
> >       struct vega20_smumgr *priv =
> >                       (struct vega20_smumgr *)(hwmgr->smu_backend);
> > +     struct amdgpu_device *adev = hwmgr->adev;
> >       int ret = 0;
> >
> >       PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, @@ -219,6 +220,8
> > @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> >       memcpy(priv->smu_tables.entry[table_id].table, table,
> >                       priv->smu_tables.entry[table_id].size);
> >
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> > +
> >       PP_ASSERT_WITH_CODE((ret =
> > vega20_send_msg_to_smc_with_parameter(hwmgr,
> >                       PPSMC_MSG_SetDriverDramAddrHigh,
> >                       upper_32_bits(priv-
> > >smu_tables.entry[table_id].mc_addr))) == 0, @@ -242,11 +245,14 @@ int
> > vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,  {
> >       struct vega20_smumgr *priv =
> >                       (struct vega20_smumgr *)(hwmgr->smu_backend);
> > +     struct amdgpu_device *adev = hwmgr->adev;
> >       int ret = 0;
> >
> >       memcpy(priv-
> > >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
> >                       priv-
> > >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
> >
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> > +
> >       PP_ASSERT_WITH_CODE((ret =
> > vega20_send_msg_to_smc_with_parameter(hwmgr,
> >                       PPSMC_MSG_SetDriverDramAddrHigh,
> >                       upper_32_bits(priv-
> > >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, @@ -
> > 290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr
> > *hwmgr,
> >                       return ret);
> >
> >       /* flush hdp cache */
> > -     adev->nbio.funcs->hdp_flush(adev, NULL);
> > +     amdgpu_asic_flush_hdp(adev, NULL);
> >
> >       memcpy(table, priv-
> > >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
> >                       priv-
> > >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
> > --
> > 2.24.1
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-01-06 23:08 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-03  9:47 [PATCH] drm/amd/powerplay: issue proper hdp flush for table transferring Evan Quan
2020-01-06  6:34 ` Quan, Evan
2020-01-06 23:08   ` Alex Deucher

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.