* [PATCH] drm/amdgpu: revert calling smu msg in df callbacks
@ 2019-10-18 20:29 Kim, Jonathan
[not found] ` <20191018202856.27133-1-jonathan.kim-5C7GfCeVMHo@public.gmane.org>
0 siblings, 1 reply; 2+ messages in thread
From: Kim, Jonathan @ 2019-10-18 20:29 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Kuehling, Felix, Kim, Jonathan
reverting the following changes:
commit 7dd2eb31fcd5 ("drm/amdgpu: fix compiler warnings for df perfmons")
commit 54275cd1649f ("drm/amdgpu: disable c-states on xgmi perfmons")
perf events use spin-locks. embedded smu messages have potential long
response times and potentially deadlocks the system.
Change-Id: Ic36c35a62dec116d0a2f5b69c22af4d414458679
Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
---
drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 38 ++--------------------------
1 file changed, 2 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index e1cf7e9c616a..16fbd2bc8ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -93,21 +93,6 @@ const struct attribute_group *df_v3_6_attr_groups[] = {
NULL
};
-static int df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
-{
- int r = 0;
-
- if (is_support_sw_smu(adev)) {
- r = smu_set_df_cstate(&adev->smu, allow);
- } else if (adev->powerplay.pp_funcs
- && adev->powerplay.pp_funcs->set_df_cstate) {
- r = adev->powerplay.pp_funcs->set_df_cstate(
- adev->powerplay.pp_handle, allow);
- }
-
- return r;
-}
-
static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
uint32_t ficaa_val)
{
@@ -117,9 +102,6 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
- return 0xFFFFFFFFFFFFFFFF;
-
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
WREG32(data, ficaa_val);
@@ -132,8 +114,6 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
-
return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
}
@@ -145,9 +125,6 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
- return;
-
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
WREG32(data, ficaa_val);
@@ -157,9 +134,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
WREG32(data, ficadh_val);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
/*
@@ -177,17 +153,12 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
- return;
-
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
*lo_val = RREG32(data);
WREG32(address, hi_addr);
*hi_val = RREG32(data);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-
- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
}
/*
@@ -204,17 +175,12 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
- return;
-
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
WREG32(data, lo_val);
WREG32(address, hi_addr);
WREG32(data, hi_val);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-
- df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
}
/* get the number of df counters available */
@@ -546,7 +512,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
*count = 0;
switch (adev->asic_type) {
--
2.17.1
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] drm/amdgpu: revert calling smu msg in df callbacks
[not found] ` <20191018202856.27133-1-jonathan.kim-5C7GfCeVMHo@public.gmane.org>
@ 2019-10-18 20:52 ` Kuehling, Felix
0 siblings, 0 replies; 2+ messages in thread
From: Kuehling, Felix @ 2019-10-18 20:52 UTC (permalink / raw)
To: Kim, Jonathan, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
On 2019-10-18 4:29 p.m., Kim, Jonathan wrote:
> reverting the following changes:
> commit 7dd2eb31fcd5 ("drm/amdgpu: fix compiler warnings for df perfmons")
> commit 54275cd1649f ("drm/amdgpu: disable c-states on xgmi perfmons")
>
> perf events use spin-locks. embedded smu messages have potential long
> response times and potentially deadlocks the system.
>
> Change-Id: Ic36c35a62dec116d0a2f5b69c22af4d414458679
> Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
See one more comment inline below ...
> ---
> drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 38 ++--------------------------
> 1 file changed, 2 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> index e1cf7e9c616a..16fbd2bc8ad1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> @@ -93,21 +93,6 @@ const struct attribute_group *df_v3_6_attr_groups[] = {
> NULL
> };
>
> -static int df_v3_6_set_df_cstate(struct amdgpu_device *adev, int allow)
> -{
> - int r = 0;
> -
> - if (is_support_sw_smu(adev)) {
> - r = smu_set_df_cstate(&adev->smu, allow);
> - } else if (adev->powerplay.pp_funcs
> - && adev->powerplay.pp_funcs->set_df_cstate) {
> - r = adev->powerplay.pp_funcs->set_df_cstate(
> - adev->powerplay.pp_handle, allow);
> - }
> -
> - return r;
> -}
> -
> static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
> uint32_t ficaa_val)
> {
> @@ -117,9 +102,6 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
> address = adev->nbio.funcs->get_pcie_index_offset(adev);
> data = adev->nbio.funcs->get_pcie_data_offset(adev);
>
> - if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
> - return 0xFFFFFFFFFFFFFFFF;
> -
> spin_lock_irqsave(&adev->pcie_idx_lock, flags);
> WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
> WREG32(data, ficaa_val);
> @@ -132,8 +114,6 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
>
> spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
>
> - df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
> -
> return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
> }
>
> @@ -145,9 +125,6 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
> address = adev->nbio.funcs->get_pcie_index_offset(adev);
> data = adev->nbio.funcs->get_pcie_data_offset(adev);
>
> - if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
> - return;
> -
> spin_lock_irqsave(&adev->pcie_idx_lock, flags);
> WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
> WREG32(data, ficaa_val);
> @@ -157,9 +134,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
>
> WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
> WREG32(data, ficadh_val);
> - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
>
> - df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
> + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
> }
>
> /*
> @@ -177,17 +153,12 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
> address = adev->nbio.funcs->get_pcie_index_offset(adev);
> data = adev->nbio.funcs->get_pcie_data_offset(adev);
>
> - if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
> - return;
> -
> spin_lock_irqsave(&adev->pcie_idx_lock, flags);
> WREG32(address, lo_addr);
> *lo_val = RREG32(data);
> WREG32(address, hi_addr);
> *hi_val = RREG32(data);
> spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
> -
> - df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
> }
>
> /*
> @@ -204,17 +175,12 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
> address = adev->nbio.funcs->get_pcie_index_offset(adev);
> data = adev->nbio.funcs->get_pcie_data_offset(adev);
>
> - if (df_v3_6_set_df_cstate(adev, DF_CSTATE_DISALLOW))
> - return;
> -
> spin_lock_irqsave(&adev->pcie_idx_lock, flags);
> WREG32(address, lo_addr);
> WREG32(data, lo_val);
> WREG32(address, hi_addr);
> WREG32(data, hi_val);
> spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
> -
> - df_v3_6_set_df_cstate(adev, DF_CSTATE_ALLOW);
> }
>
> /* get the number of df counters available */
> @@ -546,7 +512,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
> uint64_t config,
> uint64_t *count)
> {
> - uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
> + uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
This part looks like it was unrelated to the DF Cstate changes. If this
addressed a real problem, maybe it can be reintroduced with a separate
commit after this revert.
Regards,
Felix
> *count = 0;
>
> switch (adev->asic_type) {
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-10-18 20:52 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-18 20:29 [PATCH] drm/amdgpu: revert calling smu msg in df callbacks Kim, Jonathan
[not found] ` <20191018202856.27133-1-jonathan.kim-5C7GfCeVMHo@public.gmane.org>
2019-10-18 20:52 ` Kuehling, Felix
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.