* [PATCH 1/2] drm/amdgpu: add perfmons accessible during df c-states
@ 2019-12-17 17:28 Jonathan Kim
2019-12-17 17:28 ` [PATCH 2/2] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm Jonathan Kim
0 siblings, 1 reply; 3+ messages in thread
From: Jonathan Kim @ 2019-12-17 17:28 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Felix.Kuehling, Jonathan Kim
During DF C-State, Perfmon counters outside of range 1D700-1D7FF will
encounter SLVERR affecting xGMI performance monitoring. PerfmonCtr[7:4]
is being added to avoid SLVERR during read since it falls within this
range. PerfmonCtl[7:4] is being added in order to arm PerfmonCtr[7:4].
Since PerfmonCtl[7:4] exists outside of range 1D700-1D7FF, DF routines
will be enabled to opportunistically re-arm PerfmonCtl[7:4] on retry
after SLVERR.
Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
Acked-by: Alex Deucher <Alexander.Deucher@amd.com>
---
.../drm/amd/include/asic_reg/df/df_3_6_offset.h | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index c2bd25589e84..f301e5fe2109 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -38,6 +38,14 @@
#define smnPerfMonCtlHi2 0x01d464UL
#define smnPerfMonCtlLo3 0x01d470UL
#define smnPerfMonCtlHi3 0x01d474UL
+#define smnPerfMonCtlLo4 0x01d880UL
+#define smnPerfMonCtlHi4 0x01d884UL
+#define smnPerfMonCtlLo5 0x01d888UL
+#define smnPerfMonCtlHi5 0x01d88cUL
+#define smnPerfMonCtlLo6 0x01d890UL
+#define smnPerfMonCtlHi6 0x01d894UL
+#define smnPerfMonCtlLo7 0x01d898UL
+#define smnPerfMonCtlHi7 0x01d89cUL
#define smnPerfMonCtrLo0 0x01d448UL
#define smnPerfMonCtrHi0 0x01d44cUL
@@ -47,6 +55,14 @@
#define smnPerfMonCtrHi2 0x01d46cUL
#define smnPerfMonCtrLo3 0x01d478UL
#define smnPerfMonCtrHi3 0x01d47cUL
+#define smnPerfMonCtrLo4 0x01d790UL
+#define smnPerfMonCtrHi4 0x01d794UL
+#define smnPerfMonCtrLo5 0x01d798UL
+#define smnPerfMonCtrHi5 0x01d79cUL
+#define smnPerfMonCtrLo6 0x01d7a0UL
+#define smnPerfMonCtrHi6 0x01d7a4UL
+#define smnPerfMonCtrLo7 0x01d7a8UL
+#define smnPerfMonCtrHi7 0x01d7acUL
#define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3 0x1d05cUL
#define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL
--
2.17.1
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 2/2] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
2019-12-17 17:28 [PATCH 1/2] drm/amdgpu: add perfmons accessible during df c-states Jonathan Kim
@ 2019-12-17 17:28 ` Jonathan Kim
2019-12-17 22:18 ` Felix Kuehling
0 siblings, 1 reply; 3+ messages in thread
From: Jonathan Kim @ 2019-12-17 17:28 UTC (permalink / raw)
To: amd-gfx; +Cc: Alexander.Deucher, Felix.Kuehling, Jonathan Kim
The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.
Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
---
drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 153 ++++++++++++++++++++-------
1 file changed, 117 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..af445054305f 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -162,25 +162,45 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
}
/*
- * df_v3_6_perfmon_wreg - write to perfmon lo and hi
- *
- * required to be atomic. no mmio method provided so subsequent reads after
- * data writes cannot occur to preserve data fabrics finite state machine.
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
*/
-static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
- uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
+#define ARM_RETRY_USEC_TIMEOUT 1000
+#define ARM_RETRY_USEC_INTERVAL 100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t hi_val)
{
unsigned long flags, address, data;
+ uint32_t lo_val_rb, hi_val_rb;
+ int countdown = ARM_RETRY_USEC_TIMEOUT;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, lo_addr);
- WREG32(data, lo_val);
- WREG32(address, hi_addr);
- WREG32(data, hi_val);
+
+ while (countdown) {
+ WREG32(address, lo_addr);
+ WREG32(data, lo_val);
+ WREG32(address, hi_addr);
+ WREG32(data, hi_val);
+
+ WREG32(address, lo_addr);
+ lo_val_rb = RREG32(data);
+ WREG32(address, hi_addr);
+ hi_val_rb = RREG32(data);
+
+ if (lo_val == lo_val_rb && hi_val == hi_val_rb)
+ break;
+
+ countdown -= ARM_RETRY_USEC_INTERVAL;
+ udelay(ARM_RETRY_USEC_INTERVAL);
+ }
+
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return countdown > 0 ? 0 : -ETIME;
}
/* get the number of df counters available */
@@ -334,20 +354,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
switch (target_cntr) {
case 0:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
break;
case 1:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
break;
case 2:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
break;
case 3:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
break;
}
@@ -422,6 +442,42 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
return -ENOSPC;
}
+#define DEFERRED_ARM_MASK (1 << 31)
+static int df_v3_6_pmc_defer_cntr(struct amdgpu_device *adev,
+ uint64_t config, int err)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ if (target_cntr < 0)
+ return -EINVAL;
+
+ if (err)
+ adev->df_perfmon_config_assign_mask[target_cntr] |=
+ DEFERRED_ARM_MASK;
+ else
+ adev->df_perfmon_config_assign_mask[target_cntr] &=
+ ~DEFERRED_ARM_MASK;
+
+ return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+ uint64_t config)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ if (target_cntr < 0)
+ return -EINVAL;
+
+ return (adev->df_perfmon_config_assign_mask[target_cntr]
+ & DEFERRED_ARM_MASK);
+
+}
+
/* release performance counter */
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
uint64_t config)
@@ -433,7 +489,7 @@ static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
}
-static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
+static int df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
uint64_t config)
{
uint32_t lo_base_addr, hi_base_addr;
@@ -442,38 +498,54 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
&hi_base_addr);
if ((lo_base_addr == 0) || (hi_base_addr == 0))
- return;
+ return -EINVAL;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+ return df_v3_6_perfmon_arm_with_retry(adev, lo_base_addr, 0,
+ hi_base_addr, 0);
}
static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
int is_enable)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
- int ret = 0;
+ int err = 0, ret = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
-
- df_v3_6_reset_perfmon_cntr(adev, config);
-
if (is_enable) {
ret = df_v3_6_pmc_add_cntr(adev, config);
- } else {
- ret = df_v3_6_pmc_get_ctrl_settings(adev,
+ return ret;
+ }
+
+ err = df_v3_6_reset_perfmon_cntr(adev, config);
+
+ ret = df_v3_6_pmc_defer_cntr(adev, config, err);
+
+ if (err || ret)
+ return ret;
+
+ ret = df_v3_6_pmc_get_ctrl_settings(adev,
config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
&hi_val);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
- hi_base_addr, hi_val);
- }
+ /*
+ * if arm with retries fail, mark reserved counter high bit to
+ * indicate that event arm has failed. retry will occur
+ * during pmc_count in this case.
+ */
+ err = df_v3_6_perfmon_arm_with_retry(adev,
+ lo_base_addr,
+ lo_val,
+ hi_base_addr,
+ hi_val);
+
+ ret = df_v3_6_pmc_defer_cntr(adev, config, err);
break;
default:
@@ -501,7 +573,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
if (ret)
return ret;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+ df_v3_6_reset_perfmon_cntr(adev, config);
if (is_disable)
df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +590,28 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
+ int rearm_err = 0;
*count = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
-
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
&hi_base_addr);
if ((lo_base_addr == 0) || (hi_base_addr == 0))
return;
+ /* rearm the counter or throw away count value on failure */
+ if (df_v3_6_pmc_is_deferred(adev, config)) {
+ rearm_err = df_v3_6_perfmon_arm_with_retry(
+ adev, lo_base_addr, lo_val,
+ hi_base_addr, hi_val);
+ }
+
+ if (rearm_err)
+ return;
+
df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
hi_base_addr, &hi_val);
@@ -542,7 +624,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
config, lo_base_addr, hi_base_addr, lo_val, hi_val);
break;
-
default:
break;
}
--
2.17.1
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH 2/2] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
2019-12-17 17:28 ` [PATCH 2/2] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm Jonathan Kim
@ 2019-12-17 22:18 ` Felix Kuehling
0 siblings, 0 replies; 3+ messages in thread
From: Felix Kuehling @ 2019-12-17 22:18 UTC (permalink / raw)
To: Jonathan Kim, amd-gfx; +Cc: Alexander.Deucher
On 2019-12-17 12:28, Jonathan Kim wrote:
> The DF routines to arm xGMI performance will attempt to re-arm both on
> performance monitoring start and read on initial failure to arm.
>
> Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 153 ++++++++++++++++++++-------
> 1 file changed, 117 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> index 4043ebcea5de..af445054305f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> @@ -162,25 +162,45 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
> }
>
> /*
> - * df_v3_6_perfmon_wreg - write to perfmon lo and hi
> - *
> - * required to be atomic. no mmio method provided so subsequent reads after
> - * data writes cannot occur to preserve data fabrics finite state machine.
> + * retry arming counters every 100 usecs within 1 millisecond interval.
> + * if retry fails after time out, return error.
> */
> -static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
> - uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
> +#define ARM_RETRY_USEC_TIMEOUT 1000
> +#define ARM_RETRY_USEC_INTERVAL 100
> +static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
> + uint32_t lo_addr, uint32_t lo_val,
> + uint32_t hi_addr, uint32_t hi_val)
> {
> unsigned long flags, address, data;
> + uint32_t lo_val_rb, hi_val_rb;
> + int countdown = ARM_RETRY_USEC_TIMEOUT;
>
> address = adev->nbio.funcs->get_pcie_index_offset(adev);
> data = adev->nbio.funcs->get_pcie_data_offset(adev);
>
> spin_lock_irqsave(&adev->pcie_idx_lock, flags);
> - WREG32(address, lo_addr);
> - WREG32(data, lo_val);
> - WREG32(address, hi_addr);
> - WREG32(data, hi_val);
> +
> + while (countdown) {
> + WREG32(address, lo_addr);
> + WREG32(data, lo_val);
> + WREG32(address, hi_addr);
> + WREG32(data, hi_val);
> +
> + WREG32(address, lo_addr);
> + lo_val_rb = RREG32(data);
> + WREG32(address, hi_addr);
> + hi_val_rb = RREG32(data);
> +
> + if (lo_val == lo_val_rb && hi_val == hi_val_rb)
> + break;
> +
> + countdown -= ARM_RETRY_USEC_INTERVAL;
> + udelay(ARM_RETRY_USEC_INTERVAL);
> + }
> +
> spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
I don't think it's a good idea to hold the spin lock for the entire
duration of this retry loop. Maybe put that inside the loop and release
the lock while waiting in udelay.
> +
> + return countdown > 0 ? 0 : -ETIME;
> }
>
> /* get the number of df counters available */
> @@ -334,20 +354,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
> switch (target_cntr) {
>
> case 0:
> - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
> - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
> + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
> + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
> break;
> case 1:
> - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
> - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
> + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
> + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
> break;
> case 2:
> - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
> - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
> + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
> + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
> break;
> case 3:
> - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
> - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
> + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
> + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
> break;
>
> }
> @@ -422,6 +442,42 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
> return -ENOSPC;
> }
>
> +#define DEFERRED_ARM_MASK (1 << 31)
> +static int df_v3_6_pmc_defer_cntr(struct amdgpu_device *adev,
> + uint64_t config, int err)
Consider renaming this function. I found its usage confusing because
it's used to defer arming as well as clearing the deferred flag. Maybe
df_v3_6_pmc_set_deferred. The "err" parameter could be named "defer" to
better indicate its meaning and maybe make it bool, since that's what's
returned by the counterpart df_v3_6_pmc_is_deferred.
> +{
> + int target_cntr;
> +
> + target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
> +
> + if (target_cntr < 0)
> + return -EINVAL;
> +
> + if (err)
> + adev->df_perfmon_config_assign_mask[target_cntr] |=
> + DEFERRED_ARM_MASK;
> + else
> + adev->df_perfmon_config_assign_mask[target_cntr] &=
> + ~DEFERRED_ARM_MASK;
> +
> + return 0;
> +}
> +
> +static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
> + uint64_t config)
> +{
> + int target_cntr;
> +
> + target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
> +
> + if (target_cntr < 0)
> + return -EINVAL;
> +
> + return (adev->df_perfmon_config_assign_mask[target_cntr]
> + & DEFERRED_ARM_MASK);
> +
> +}
> +
> /* release performance counter */
> static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
> uint64_t config)
> @@ -433,7 +489,7 @@ static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
> }
>
>
> -static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
> +static int df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
> uint64_t config)
> {
> uint32_t lo_base_addr, hi_base_addr;
> @@ -442,38 +498,54 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
> &hi_base_addr);
>
> if ((lo_base_addr == 0) || (hi_base_addr == 0))
> - return;
> + return -EINVAL;
>
> - df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
> + return df_v3_6_perfmon_arm_with_retry(adev, lo_base_addr, 0,
> + hi_base_addr, 0);
> }
>
> static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
> int is_enable)
> {
> uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
> - int ret = 0;
> + int err = 0, ret = 0;
>
> switch (adev->asic_type) {
> case CHIP_VEGA20:
> -
> - df_v3_6_reset_perfmon_cntr(adev, config);
> -
> if (is_enable) {
> ret = df_v3_6_pmc_add_cntr(adev, config);
> - } else {
> - ret = df_v3_6_pmc_get_ctrl_settings(adev,
> + return ret;
This could be one line
return df_v3_6_pmc_get_cntr(adev, config);
> + }
> +
> + err = df_v3_6_reset_perfmon_cntr(adev, config);
> +
> + ret = df_v3_6_pmc_defer_cntr(adev, config, err);
This is confusing. This looks like you're always deferring, but in fact
this is conditional based on err. See my suggesting about renaming the
function above.
> +
> + if (err || ret)
> + return ret;
> +
> + ret = df_v3_6_pmc_get_ctrl_settings(adev,
> config,
> &lo_base_addr,
> &hi_base_addr,
> &lo_val,
> &hi_val);
>
> - if (ret)
> - return ret;
> + if (ret)
> + return ret;
>
> - df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
> - hi_base_addr, hi_val);
> - }
> + /*
> + * if arm with retries fail, mark reserved counter high bit to
> + * indicate that event arm has failed. retry will occur
> + * during pmc_count in this case.
You're explaining the implementation of df_v3_6_pmc_defer_cntr rather
than the abstract interface. That's more confusing than helpful.
> + */
> + err = df_v3_6_perfmon_arm_with_retry(adev,
> + lo_base_addr,
> + lo_val,
> + hi_base_addr,
> + hi_val);
> +
> + ret = df_v3_6_pmc_defer_cntr(adev, config, err);
>
> break;
> default:
> @@ -501,7 +573,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
> if (ret)
> return ret;
>
> - df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
> + df_v3_6_reset_perfmon_cntr(adev, config);
Is this guaranteed to succeed? If not, we should check the return value.
>
> if (is_disable)
> df_v3_6_pmc_release_cntr(adev, config);
> @@ -518,18 +590,28 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
> uint64_t config,
> uint64_t *count)
> {
> - uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
> + uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
> + int rearm_err = 0;
> *count = 0;
>
> switch (adev->asic_type) {
> case CHIP_VEGA20:
> -
> df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
> &hi_base_addr);
>
> if ((lo_base_addr == 0) || (hi_base_addr == 0))
> return;
>
> + /* rearm the counter or throw away count value on failure */
> + if (df_v3_6_pmc_is_deferred(adev, config)) {
> + rearm_err = df_v3_6_perfmon_arm_with_retry(
> + adev, lo_base_addr, lo_val,
> + hi_base_addr, hi_val);
Is it a good idea to retry here? I believe this is called under a
spin-lock. The get_count should usually be fast. Is it worth retrying
and potentially delaying the entire perf framework for a millisecond?
Regards,
Felix
> + }
> +
> + if (rearm_err)
> + return;
> +
> df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
> hi_base_addr, &hi_val);
>
> @@ -542,7 +624,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
> config, lo_base_addr, hi_base_addr, lo_val, hi_val);
>
> break;
> -
> default:
> break;
> }
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2019-12-17 22:18 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-17 17:28 [PATCH 1/2] drm/amdgpu: add perfmons accessible during df c-states Jonathan Kim
2019-12-17 17:28 ` [PATCH 2/2] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm Jonathan Kim
2019-12-17 22:18 ` Felix Kuehling
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.