All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
@ 2019-12-18 19:04 Jonathan Kim
  2019-12-18 21:56 ` Felix Kuehling
  0 siblings, 1 reply; 5+ messages in thread
From: Jonathan Kim @ 2019-12-18 19:04 UTC (permalink / raw)
  To: amd-gfx; +Cc: Felix.Kuehling, Jonathan Kim

The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.

v2: Roll back reset_perfmon_cntr to void return since new perfmon
counters are now safe to write to during DF C-States.  Do single perfmon
controller re-arm when counter is deferred in pmc_count versus multiple
perfmon controller re-arms that could last 1 millisecond. Avoid holding
spin lock during sleep in perfmon_arm_with_retry.  Rename counter arm
defer function to be less confusing.

Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 149 +++++++++++++++++++++++----
 1 file changed, 127 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..56dead3a9718 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
+/* same as perfmon arm but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	unsigned long flags, address, data;
+	uint32_t lo_val_rb, hi_val_rb;
+
+	address = adev->nbio.funcs->get_pcie_index_offset(adev);
+	data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(address, lo_addr);
+	WREG32(data, lo_val);
+	WREG32(address, hi_addr);
+	WREG32(data, hi_val);
+
+	WREG32(address, lo_addr);
+	lo_val_rb = RREG32(data);
+	WREG32(address, hi_addr);
+	hi_val_rb = RREG32(data);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+	if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+		return -EBUSY;
+
+	return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT	1000
+#define ARM_RETRY_USEC_INTERVAL	100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+	while (countdown) {
+
+		if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+						     hi_addr, hi_val))
+			break;
+
+		countdown -= ARM_RETRY_USEC_INTERVAL;
+		udelay(ARM_RETRY_USEC_INTERVAL);
+	}
+
+	return countdown > 0 ? 0 : -ETIME;
+}
+
 /* get the number of df counters available */
 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
 		struct device_attribute *attr,
@@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
 	switch (target_cntr) {
 
 	case 0:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
 		break;
 	case 1:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
 		break;
 	case 2:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
 		break;
 	case 3:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
 		break;
 
 	}
@@ -422,6 +477,42 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
 	return -ENOSPC;
 }
 
+#define DEFERRED_ARM_MASK	(1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+				    uint64_t config, bool is_deferred)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	if (target_cntr < 0)
+		return -EINVAL;
+
+	if (is_deferred)
+		adev->df_perfmon_config_assign_mask[target_cntr] |=
+							DEFERRED_ARM_MASK;
+	else
+		adev->df_perfmon_config_assign_mask[target_cntr] &=
+							~DEFERRED_ARM_MASK;
+
+	return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+				    uint64_t config)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	if (target_cntr < 0)
+		return -EINVAL;
+
+	return (adev->df_perfmon_config_assign_mask[target_cntr]
+							& DEFERRED_ARM_MASK);
+
+}
+
 /* release performance counter */
 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
 				     uint64_t config)
@@ -451,29 +542,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
 			     int is_enable)
 {
 	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
-	int ret = 0;
+	int err = 0, ret = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
+		if (is_enable)
+			return df_v3_6_pmc_add_cntr(adev, config);
 
 		df_v3_6_reset_perfmon_cntr(adev, config);
 
-		if (is_enable) {
-			ret = df_v3_6_pmc_add_cntr(adev, config);
-		} else {
-			ret = df_v3_6_pmc_get_ctrl_settings(adev,
+		ret = df_v3_6_pmc_get_ctrl_settings(adev,
 					config,
 					&lo_base_addr,
 					&hi_base_addr,
 					&lo_val,
 					&hi_val);
 
-			if (ret)
-				return ret;
+		if (ret)
+			return ret;
 
-			df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
-					hi_base_addr, hi_val);
-		}
+		err = df_v3_6_perfmon_arm_with_retry(adev,
+						     lo_base_addr,
+						     lo_val,
+						     hi_base_addr,
+						     hi_val);
+
+		if (err)
+			ret = df_v3_6_pmc_set_deferred(adev, config, 1);
 
 		break;
 	default:
@@ -501,7 +596,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
 		if (ret)
 			return ret;
 
-		df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+		df_v3_6_reset_perfmon_cntr(adev, config);
 
 		if (is_disable)
 			df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +613,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 				  uint64_t config,
 				  uint64_t *count)
 {
-	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+	uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
 	*count = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
-
 		df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
 				      &hi_base_addr);
 
 		if ((lo_base_addr == 0) || (hi_base_addr == 0))
 			return;
 
+		/* rearm the counter or throw away count value on failure */
+		if (df_v3_6_pmc_is_deferred(adev, config)) {
+			int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+							lo_base_addr, lo_val,
+							hi_base_addr, hi_val);
+
+			if (!rearm_err)
+				df_v3_6_pmc_set_deferred(adev, config, 0);
+			else
+				return;
+		}
+
 		df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
 				hi_base_addr, &hi_val);
 
@@ -542,7 +648,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 			 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
 
 		break;
-
 	default:
 		break;
 	}
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
  2019-12-18 19:04 [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm Jonathan Kim
@ 2019-12-18 21:56 ` Felix Kuehling
  0 siblings, 0 replies; 5+ messages in thread
From: Felix Kuehling @ 2019-12-18 21:56 UTC (permalink / raw)
  To: Jonathan Kim, amd-gfx

Some nit-picks inline. Looks good otherwise.

On 2019-12-18 2:04 p.m., Jonathan Kim wrote:
> The DF routines to arm xGMI performance will attempt to re-arm both on
> performance monitoring start and read on initial failure to arm.
>
> v2: Roll back reset_perfmon_cntr to void return since new perfmon
> counters are now safe to write to during DF C-States.  Do single perfmon
> controller re-arm when counter is deferred in pmc_count versus multiple
> perfmon controller re-arms that could last 1 millisecond. Avoid holding
> spin lock during sleep in perfmon_arm_with_retry.  Rename counter arm
> defer function to be less confusing.
>
> Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 149 +++++++++++++++++++++++----
>   1 file changed, 127 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> index 4043ebcea5de..56dead3a9718 100644
> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> @@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
>   	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
>   }
>   
> +/* same as perfmon arm but return status on write value check */
> +static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
> +					  uint32_t lo_addr, uint32_t lo_val,
> +					  uint32_t hi_addr, uint32_t  hi_val)
> +{
> +	unsigned long flags, address, data;
> +	uint32_t lo_val_rb, hi_val_rb;
> +
> +	address = adev->nbio.funcs->get_pcie_index_offset(adev);
> +	data = adev->nbio.funcs->get_pcie_data_offset(adev);
> +
> +	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
> +	WREG32(address, lo_addr);
> +	WREG32(data, lo_val);
> +	WREG32(address, hi_addr);
> +	WREG32(data, hi_val);
> +
> +	WREG32(address, lo_addr);
> +	lo_val_rb = RREG32(data);
> +	WREG32(address, hi_addr);
> +	hi_val_rb = RREG32(data);
> +	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
> +
> +	if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
> +		return -EBUSY;
> +
> +	return 0;
> +}
> +
> +
> +/*
> + * retry arming counters every 100 usecs within 1 millisecond interval.
> + * if retry fails after time out, return error.
> + */
> +#define ARM_RETRY_USEC_TIMEOUT	1000
> +#define ARM_RETRY_USEC_INTERVAL	100
> +static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
> +					  uint32_t lo_addr, uint32_t lo_val,
> +					  uint32_t hi_addr, uint32_t  hi_val)
> +{
> +	int countdown = ARM_RETRY_USEC_TIMEOUT;
> +
> +	while (countdown) {
> +
> +		if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
> +						     hi_addr, hi_val))
> +			break;
> +
> +		countdown -= ARM_RETRY_USEC_INTERVAL;
> +		udelay(ARM_RETRY_USEC_INTERVAL);
> +	}
> +
> +	return countdown > 0 ? 0 : -ETIME;
> +}
> +
>   /* get the number of df counters available */
>   static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
>   		struct device_attribute *attr,
> @@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
>   	switch (target_cntr) {
>   
>   	case 0:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
>   		break;
>   	case 1:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
>   		break;
>   	case 2:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
>   		break;
>   	case 3:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
>   		break;
>   
>   	}
> @@ -422,6 +477,42 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
>   	return -ENOSPC;
>   }
>   
> +#define DEFERRED_ARM_MASK	(1 << 31)
> +static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
> +				    uint64_t config, bool is_deferred)
> +{
> +	int target_cntr;
> +
> +	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
> +
> +	if (target_cntr < 0)
> +		return -EINVAL;
> +
> +	if (is_deferred)
> +		adev->df_perfmon_config_assign_mask[target_cntr] |=
> +							DEFERRED_ARM_MASK;
> +	else
> +		adev->df_perfmon_config_assign_mask[target_cntr] &=
> +							~DEFERRED_ARM_MASK;
> +
> +	return 0;
> +}
> +
> +static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
> +				    uint64_t config)
> +{
> +	int target_cntr;
> +
> +	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
> +
> +	if (target_cntr < 0)
> +		return -EINVAL;

This error code won't be returned correctly because the function returns 
bool.


> +
> +	return (adev->df_perfmon_config_assign_mask[target_cntr]
> +							& DEFERRED_ARM_MASK);
> +
> +}
> +
>   /* release performance counter */
>   static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
>   				     uint64_t config)
> @@ -451,29 +542,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
>   			     int is_enable)
>   {
>   	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
> -	int ret = 0;
> +	int err = 0, ret = 0;
>   
>   	switch (adev->asic_type) {
>   	case CHIP_VEGA20:
> +		if (is_enable)
> +			return df_v3_6_pmc_add_cntr(adev, config);
>   
>   		df_v3_6_reset_perfmon_cntr(adev, config);
>   
> -		if (is_enable) {
> -			ret = df_v3_6_pmc_add_cntr(adev, config);
> -		} else {
> -			ret = df_v3_6_pmc_get_ctrl_settings(adev,
> +		ret = df_v3_6_pmc_get_ctrl_settings(adev,
>   					config,
>   					&lo_base_addr,
>   					&hi_base_addr,
>   					&lo_val,
>   					&hi_val);
>   
> -			if (ret)
> -				return ret;
> +		if (ret)
> +			return ret;
>   
> -			df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
> -					hi_base_addr, hi_val);
> -		}
> +		err = df_v3_6_perfmon_arm_with_retry(adev,
> +						     lo_base_addr,
> +						     lo_val,
> +						     hi_base_addr,
> +						     hi_val);
> +
> +		if (err)
> +			ret = df_v3_6_pmc_set_deferred(adev, config, 1);

Pass "true" for the boolean parameter.


>   
>   		break;
>   	default:
> @@ -501,7 +596,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
>   		if (ret)
>   			return ret;
>   
> -		df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
> +		df_v3_6_reset_perfmon_cntr(adev, config);
>   
>   		if (is_disable)
>   			df_v3_6_pmc_release_cntr(adev, config);
> @@ -518,18 +613,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
>   				  uint64_t config,
>   				  uint64_t *count)
>   {
> -	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
> +	uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
>   	*count = 0;
>   
>   	switch (adev->asic_type) {
>   	case CHIP_VEGA20:
> -
>   		df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
>   				      &hi_base_addr);
>   
>   		if ((lo_base_addr == 0) || (hi_base_addr == 0))
>   			return;
>   
> +		/* rearm the counter or throw away count value on failure */
> +		if (df_v3_6_pmc_is_deferred(adev, config)) {
> +			int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
> +							lo_base_addr, lo_val,
> +							hi_base_addr, hi_val);
> +
> +			if (!rearm_err)
> +				df_v3_6_pmc_set_deferred(adev, config, 0);

Pass "false" for the boolean parameter.

Also, I find the logic here a bit convoluted. This would be clearer IMO:

     if (rearm_err)
         return;

     df_v3_6_pmc_set_deferred(adev, config, false);
     ...


Regards,
   Felix


> +			else
> +				return;
> +		}
> +
>   		df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
>   				hi_base_addr, &hi_val);
>   
> @@ -542,7 +648,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
>   			 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
>   
>   		break;
> -
>   	default:
>   		break;
>   	}
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
  2019-12-19 21:30 Jonathan Kim
  2019-12-19 21:31 ` Kim, Jonathan
@ 2019-12-19 22:03 ` Felix Kuehling
  1 sibling, 0 replies; 5+ messages in thread
From: Felix Kuehling @ 2019-12-19 22:03 UTC (permalink / raw)
  To: Jonathan Kim, amd-gfx

On 2019-12-19 4:30 p.m., Jonathan Kim wrote:
> The DF routines to arm xGMI performance will attempt to re-arm both on
> performance monitoring start and read on initial failure to arm.
>
> v3: Addressing nit-picks.
>
> v2: Roll back reset_perfmon_cntr to void return since new perfmon
> counters are now safe to write to during DF C-States.  Do single perfmon
> controller re-arm when counter is deferred in pmc_count versus multiple
> perfmon controller re-arms that could last 1 millisecond. Avoid holding
> spin lock during sleep in perfmon_arm_with_retry.  Rename counter arm
> defer function to be less confusing.
>
> Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>

Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>


> ---
>   drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 151 +++++++++++++++++++++++----
>   1 file changed, 129 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> index 4043ebcea5de..2f884d941e8d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> @@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
>   	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
>   }
>   
> +/* same as perfmon_wreg but return status on write value check */
> +static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
> +					  uint32_t lo_addr, uint32_t lo_val,
> +					  uint32_t hi_addr, uint32_t  hi_val)
> +{
> +	unsigned long flags, address, data;
> +	uint32_t lo_val_rb, hi_val_rb;
> +
> +	address = adev->nbio.funcs->get_pcie_index_offset(adev);
> +	data = adev->nbio.funcs->get_pcie_data_offset(adev);
> +
> +	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
> +	WREG32(address, lo_addr);
> +	WREG32(data, lo_val);
> +	WREG32(address, hi_addr);
> +	WREG32(data, hi_val);
> +
> +	WREG32(address, lo_addr);
> +	lo_val_rb = RREG32(data);
> +	WREG32(address, hi_addr);
> +	hi_val_rb = RREG32(data);
> +	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
> +
> +	if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
> +		return -EBUSY;
> +
> +	return 0;
> +}
> +
> +
> +/*
> + * retry arming counters every 100 usecs within 1 millisecond interval.
> + * if retry fails after time out, return error.
> + */
> +#define ARM_RETRY_USEC_TIMEOUT	1000
> +#define ARM_RETRY_USEC_INTERVAL	100
> +static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
> +					  uint32_t lo_addr, uint32_t lo_val,
> +					  uint32_t hi_addr, uint32_t  hi_val)
> +{
> +	int countdown = ARM_RETRY_USEC_TIMEOUT;
> +
> +	while (countdown) {
> +
> +		if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
> +						     hi_addr, hi_val))
> +			break;
> +
> +		countdown -= ARM_RETRY_USEC_INTERVAL;
> +		udelay(ARM_RETRY_USEC_INTERVAL);
> +	}
> +
> +	return countdown > 0 ? 0 : -ETIME;
> +}
> +
>   /* get the number of df counters available */
>   static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
>   		struct device_attribute *attr,
> @@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
>   	switch (target_cntr) {
>   
>   	case 0:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
>   		break;
>   	case 1:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
>   		break;
>   	case 2:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
>   		break;
>   	case 3:
> -		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
> -		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
> +		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
> +		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
>   		break;
>   
>   	}
> @@ -422,6 +477,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
>   	return -ENOSPC;
>   }
>   
> +#define DEFERRED_ARM_MASK	(1 << 31)
> +static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
> +				    uint64_t config, bool is_deferred)
> +{
> +	int target_cntr;
> +
> +	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
> +
> +	if (target_cntr < 0)
> +		return -EINVAL;
> +
> +	if (is_deferred)
> +		adev->df_perfmon_config_assign_mask[target_cntr] |=
> +							DEFERRED_ARM_MASK;
> +	else
> +		adev->df_perfmon_config_assign_mask[target_cntr] &=
> +							~DEFERRED_ARM_MASK;
> +
> +	return 0;
> +}
> +
> +static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
> +				    uint64_t config)
> +{
> +	int target_cntr;
> +
> +	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
> +
> +	/*
> +	 * we never get target_cntr < 0 since this function is only called in
> +	 * pmc_count for now but we should check anyways.
> +	 */
> +	return (target_cntr >= 0 &&
> +			(adev->df_perfmon_config_assign_mask[target_cntr]
> +			& DEFERRED_ARM_MASK));
> +
> +}
> +
>   /* release performance counter */
>   static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
>   				     uint64_t config)
> @@ -451,29 +544,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
>   			     int is_enable)
>   {
>   	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
> -	int ret = 0;
> +	int err = 0, ret = 0;
>   
>   	switch (adev->asic_type) {
>   	case CHIP_VEGA20:
> +		if (is_enable)
> +			return df_v3_6_pmc_add_cntr(adev, config);
>   
>   		df_v3_6_reset_perfmon_cntr(adev, config);
>   
> -		if (is_enable) {
> -			ret = df_v3_6_pmc_add_cntr(adev, config);
> -		} else {
> -			ret = df_v3_6_pmc_get_ctrl_settings(adev,
> +		ret = df_v3_6_pmc_get_ctrl_settings(adev,
>   					config,
>   					&lo_base_addr,
>   					&hi_base_addr,
>   					&lo_val,
>   					&hi_val);
>   
> -			if (ret)
> -				return ret;
> +		if (ret)
> +			return ret;
> +
> +		err = df_v3_6_perfmon_arm_with_retry(adev,
> +						     lo_base_addr,
> +						     lo_val,
> +						     hi_base_addr,
> +						     hi_val);
>   
> -			df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
> -					hi_base_addr, hi_val);
> -		}
> +		if (err)
> +			ret = df_v3_6_pmc_set_deferred(adev, config, true);
>   
>   		break;
>   	default:
> @@ -501,7 +598,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
>   		if (ret)
>   			return ret;
>   
> -		df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
> +		df_v3_6_reset_perfmon_cntr(adev, config);
>   
>   		if (is_disable)
>   			df_v3_6_pmc_release_cntr(adev, config);
> @@ -518,18 +615,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
>   				  uint64_t config,
>   				  uint64_t *count)
>   {
> -	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
> +	uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
>   	*count = 0;
>   
>   	switch (adev->asic_type) {
>   	case CHIP_VEGA20:
> -
>   		df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
>   				      &hi_base_addr);
>   
>   		if ((lo_base_addr == 0) || (hi_base_addr == 0))
>   			return;
>   
> +		/* rearm the counter or throw away count value on failure */
> +		if (df_v3_6_pmc_is_deferred(adev, config)) {
> +			int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
> +							lo_base_addr, lo_val,
> +							hi_base_addr, hi_val);
> +
> +			if (rearm_err)
> +				return;
> +
> +			df_v3_6_pmc_set_deferred(adev, config, false);
> +		}
> +
>   		df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
>   				hi_base_addr, &hi_val);
>   
> @@ -542,7 +650,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
>   			 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
>   
>   		break;
> -
>   	default:
>   		break;
>   	}
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
  2019-12-19 21:30 Jonathan Kim
@ 2019-12-19 21:31 ` Kim, Jonathan
  2019-12-19 22:03 ` Felix Kuehling
  1 sibling, 0 replies; 5+ messages in thread
From: Kim, Jonathan @ 2019-12-19 21:31 UTC (permalink / raw)
  To: amd-gfx; +Cc: Kuehling, Felix

[AMD Official Use Only - Internal Distribution Only]



-----Original Message-----
From: Kim, Jonathan <Jonathan.Kim@amd.com> 
Sent: Thursday, December 19, 2019 4:31 PM
To: amd-gfx@lists.freedesktop.org
Cc: Felix.Khueling@amd.com; Kim, Jonathan <Jonathan.Kim@amd.com>; Kim, Jonathan <Jonathan.Kim@amd.com>
Subject: [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm

The DF routines to arm xGMI performance will attempt to re-arm both on performance monitoring start and read on initial failure to arm.

v3: Addressing nit-picks.

v2: Roll back reset_perfmon_cntr to void return since new perfmon counters are now safe to write to during DF C-States.  Do single perfmon controller re-arm when counter is deferred in pmc_count versus multiple perfmon controller re-arms that could last 1 millisecond. Avoid holding spin lock during sleep in perfmon_arm_with_retry.  Rename counter arm defer function to be less confusing.

Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 151 +++++++++++++++++++++++----
 1 file changed, 129 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..2f884d941e8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);  }
 
+/* same as perfmon_wreg but return status on write value check */ 
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val) {
+	unsigned long flags, address, data;
+	uint32_t lo_val_rb, hi_val_rb;
+
+	address = adev->nbio.funcs->get_pcie_index_offset(adev);
+	data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(address, lo_addr);
+	WREG32(data, lo_val);
+	WREG32(address, hi_addr);
+	WREG32(data, hi_val);
+
+	WREG32(address, lo_addr);
+	lo_val_rb = RREG32(data);
+	WREG32(address, hi_addr);
+	hi_val_rb = RREG32(data);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+	if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+		return -EBUSY;
+
+	return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT	1000
+#define ARM_RETRY_USEC_INTERVAL	100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val) {
+	int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+	while (countdown) {
+
+		if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+						     hi_addr, hi_val))
+			break;
+
+		countdown -= ARM_RETRY_USEC_INTERVAL;
+		udelay(ARM_RETRY_USEC_INTERVAL);
+	}
+
+	return countdown > 0 ? 0 : -ETIME;
+}
+
 /* get the number of df counters available */  static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
 		struct device_attribute *attr,
@@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
 	switch (target_cntr) {
 
 	case 0:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
 		break;
 	case 1:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
 		break;
 	case 2:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
 		break;
 	case 3:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
 		break;
 
 	}
@@ -422,6 +477,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
 	return -ENOSPC;
 }
 
+#define DEFERRED_ARM_MASK	(1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+				    uint64_t config, bool is_deferred) {
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	if (target_cntr < 0)
+		return -EINVAL;
+
+	if (is_deferred)
+		adev->df_perfmon_config_assign_mask[target_cntr] |=
+							DEFERRED_ARM_MASK;
+	else
+		adev->df_perfmon_config_assign_mask[target_cntr] &=
+							~DEFERRED_ARM_MASK;
+
+	return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+				    uint64_t config)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	/*
+	 * we never get target_cntr < 0 since this function is only called in
+	 * pmc_count for now but we should check anyways.
+	 */
+	return (target_cntr >= 0 &&
+			(adev->df_perfmon_config_assign_mask[target_cntr]
+			& DEFERRED_ARM_MASK));
+
+}
+
 /* release performance counter */
 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
 				     uint64_t config)
@@ -451,29 +544,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
 			     int is_enable)
 {
 	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
-	int ret = 0;
+	int err = 0, ret = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
+		if (is_enable)
+			return df_v3_6_pmc_add_cntr(adev, config);
 
 		df_v3_6_reset_perfmon_cntr(adev, config);
 
-		if (is_enable) {
-			ret = df_v3_6_pmc_add_cntr(adev, config);
-		} else {
-			ret = df_v3_6_pmc_get_ctrl_settings(adev,
+		ret = df_v3_6_pmc_get_ctrl_settings(adev,
 					config,
 					&lo_base_addr,
 					&hi_base_addr,
 					&lo_val,
 					&hi_val);
 
-			if (ret)
-				return ret;
+		if (ret)
+			return ret;
+
+		err = df_v3_6_perfmon_arm_with_retry(adev,
+						     lo_base_addr,
+						     lo_val,
+						     hi_base_addr,
+						     hi_val);
 
-			df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
-					hi_base_addr, hi_val);
-		}
+		if (err)
+			ret = df_v3_6_pmc_set_deferred(adev, config, true);
 
 		break;
 	default:
@@ -501,7 +598,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
 		if (ret)
 			return ret;
 
-		df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+		df_v3_6_reset_perfmon_cntr(adev, config);
 
 		if (is_disable)
 			df_v3_6_pmc_release_cntr(adev, config); @@ -518,18 +615,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 				  uint64_t config,
 				  uint64_t *count)
 {
-	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+	uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
 	*count = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
-
 		df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
 				      &hi_base_addr);
 
 		if ((lo_base_addr == 0) || (hi_base_addr == 0))
 			return;
 
+		/* rearm the counter or throw away count value on failure */
+		if (df_v3_6_pmc_is_deferred(adev, config)) {
+			int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+							lo_base_addr, lo_val,
+							hi_base_addr, hi_val);
+
+			if (rearm_err)
+				return;
+
+			df_v3_6_pmc_set_deferred(adev, config, false);
+		}
+
 		df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
 				hi_base_addr, &hi_val);
 
@@ -542,7 +650,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 			 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
 
 		break;
-
 	default:
 		break;
 	}
--
2.17.1
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
@ 2019-12-19 21:30 Jonathan Kim
  2019-12-19 21:31 ` Kim, Jonathan
  2019-12-19 22:03 ` Felix Kuehling
  0 siblings, 2 replies; 5+ messages in thread
From: Jonathan Kim @ 2019-12-19 21:30 UTC (permalink / raw)
  To: amd-gfx; +Cc: Jonathan Kim, Felix.Khueling

The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.

v3: Addressing nit-picks.

v2: Roll back reset_perfmon_cntr to void return since new perfmon
counters are now safe to write to during DF C-States.  Do single perfmon
controller re-arm when counter is deferred in pmc_count versus multiple
perfmon controller re-arms that could last 1 millisecond. Avoid holding
spin lock during sleep in perfmon_arm_with_retry.  Rename counter arm
defer function to be less confusing.

Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 151 +++++++++++++++++++++++----
 1 file changed, 129 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..2f884d941e8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
+/* same as perfmon_wreg but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	unsigned long flags, address, data;
+	uint32_t lo_val_rb, hi_val_rb;
+
+	address = adev->nbio.funcs->get_pcie_index_offset(adev);
+	data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(address, lo_addr);
+	WREG32(data, lo_val);
+	WREG32(address, hi_addr);
+	WREG32(data, hi_val);
+
+	WREG32(address, lo_addr);
+	lo_val_rb = RREG32(data);
+	WREG32(address, hi_addr);
+	hi_val_rb = RREG32(data);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+	if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+		return -EBUSY;
+
+	return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT	1000
+#define ARM_RETRY_USEC_INTERVAL	100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+					  uint32_t lo_addr, uint32_t lo_val,
+					  uint32_t hi_addr, uint32_t  hi_val)
+{
+	int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+	while (countdown) {
+
+		if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+						     hi_addr, hi_val))
+			break;
+
+		countdown -= ARM_RETRY_USEC_INTERVAL;
+		udelay(ARM_RETRY_USEC_INTERVAL);
+	}
+
+	return countdown > 0 ? 0 : -ETIME;
+}
+
 /* get the number of df counters available */
 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
 		struct device_attribute *attr,
@@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
 	switch (target_cntr) {
 
 	case 0:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
 		break;
 	case 1:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
 		break;
 	case 2:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
 		break;
 	case 3:
-		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+		*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+		*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
 		break;
 
 	}
@@ -422,6 +477,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
 	return -ENOSPC;
 }
 
+#define DEFERRED_ARM_MASK	(1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+				    uint64_t config, bool is_deferred)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	if (target_cntr < 0)
+		return -EINVAL;
+
+	if (is_deferred)
+		adev->df_perfmon_config_assign_mask[target_cntr] |=
+							DEFERRED_ARM_MASK;
+	else
+		adev->df_perfmon_config_assign_mask[target_cntr] &=
+							~DEFERRED_ARM_MASK;
+
+	return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+				    uint64_t config)
+{
+	int target_cntr;
+
+	target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+	/*
+	 * we never get target_cntr < 0 since this function is only called in
+	 * pmc_count for now but we should check anyways.
+	 */
+	return (target_cntr >= 0 &&
+			(adev->df_perfmon_config_assign_mask[target_cntr]
+			& DEFERRED_ARM_MASK));
+
+}
+
 /* release performance counter */
 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
 				     uint64_t config)
@@ -451,29 +544,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
 			     int is_enable)
 {
 	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
-	int ret = 0;
+	int err = 0, ret = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
+		if (is_enable)
+			return df_v3_6_pmc_add_cntr(adev, config);
 
 		df_v3_6_reset_perfmon_cntr(adev, config);
 
-		if (is_enable) {
-			ret = df_v3_6_pmc_add_cntr(adev, config);
-		} else {
-			ret = df_v3_6_pmc_get_ctrl_settings(adev,
+		ret = df_v3_6_pmc_get_ctrl_settings(adev,
 					config,
 					&lo_base_addr,
 					&hi_base_addr,
 					&lo_val,
 					&hi_val);
 
-			if (ret)
-				return ret;
+		if (ret)
+			return ret;
+
+		err = df_v3_6_perfmon_arm_with_retry(adev,
+						     lo_base_addr,
+						     lo_val,
+						     hi_base_addr,
+						     hi_val);
 
-			df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
-					hi_base_addr, hi_val);
-		}
+		if (err)
+			ret = df_v3_6_pmc_set_deferred(adev, config, true);
 
 		break;
 	default:
@@ -501,7 +598,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
 		if (ret)
 			return ret;
 
-		df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+		df_v3_6_reset_perfmon_cntr(adev, config);
 
 		if (is_disable)
 			df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +615,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 				  uint64_t config,
 				  uint64_t *count)
 {
-	uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+	uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
 	*count = 0;
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA20:
-
 		df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
 				      &hi_base_addr);
 
 		if ((lo_base_addr == 0) || (hi_base_addr == 0))
 			return;
 
+		/* rearm the counter or throw away count value on failure */
+		if (df_v3_6_pmc_is_deferred(adev, config)) {
+			int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+							lo_base_addr, lo_val,
+							hi_base_addr, hi_val);
+
+			if (rearm_err)
+				return;
+
+			df_v3_6_pmc_set_deferred(adev, config, false);
+		}
+
 		df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
 				hi_base_addr, &hi_val);
 
@@ -542,7 +650,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
 			 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
 
 		break;
-
 	default:
 		break;
 	}
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-12-19 22:03 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-18 19:04 [PATCH] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm Jonathan Kim
2019-12-18 21:56 ` Felix Kuehling
2019-12-19 21:30 Jonathan Kim
2019-12-19 21:31 ` Kim, Jonathan
2019-12-19 22:03 ` Felix Kuehling

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.