* Various tidy'ups with SI DMA/DPM/SMC/IH code
@ 2016-09-06 16:18 Tom St Denis
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
0 siblings, 1 reply; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
Various cleanups including simplifications, LOC reductions
and whitespace corrections.
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH 1/7] drm/amd/amdgpu: Tidy up SI DMA code
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
@ 2016-09-06 16:18 ` Tom St Denis
2016-09-06 16:18 ` [PATCH 2/7] drm/amd/amdgpu: Allow calling si_dpm_fini at any point Tom St Denis
` (8 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/si_dma.c | 35 +++++------------------------------
1 file changed, 5 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 377f4ae9e777..2abdaa681797 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -39,16 +39,11 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
{
- u32 rptr;
-
- rptr = ring->adev->wb.wb[ring->rptr_offs/4];
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs>>2];
}
static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
{
-
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
@@ -188,7 +183,6 @@ static int si_dma_start(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
-
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
ring->ready = true;
@@ -476,11 +470,10 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm_id < 8) {
+ if (vm_id < 8)
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- } else {
+ else
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
- }
amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-7 are the VM contexts0-7 */
@@ -558,14 +551,9 @@ static int si_dma_sw_fini(void *handle)
static int si_dma_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = si_dma_start(adev);
- if (r)
- return r;
-
- return r;
+ return si_dma_start(adev);
}
static int si_dma_hw_fini(void *handle)
@@ -605,13 +593,10 @@ static bool si_dma_is_idle(void *handle)
static int si_dma_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SRBM_STATUS2) & (DMA_BUSY_MASK | DMA1_BUSY_MASK);
-
- if (!tmp)
+ if (si_dma_is_idle(handle))
return 0;
udelay(1);
}
@@ -674,11 +659,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u8 instance_id, queue_id;
-
- instance_id = (entry->ring_id & 0x3) >> 0;
- queue_id = (entry->ring_id & 0xc) >> 2;
-
amdgpu_fence_process(&adev->sdma.instance[0].ring);
return 0;
@@ -688,11 +668,6 @@ static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u8 instance_id, queue_id;
-
- instance_id = (entry->ring_id & 0x3) >> 0;
- queue_id = (entry->ring_id & 0xc) >> 2;
-
amdgpu_fence_process(&adev->sdma.instance[1].ring);
return 0;
--
2.9.3
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 2/7] drm/amd/amdgpu: Allow calling si_dpm_fini at any point
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2016-09-06 16:18 ` [PATCH 1/7] drm/amd/amdgpu: Tidy up SI DMA code Tom St Denis
@ 2016-09-06 16:18 ` Tom St Denis
2016-09-06 16:18 ` [PATCH 3/7] drm/amd/amdgpu: Clean up SI DPM table assignments Tom St Denis
` (7 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
Allow calling fini even if ps array is not allocated.
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/si_dpm.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index e90bb4423cc7..2a72b2b5801d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7519,9 +7519,9 @@ static void si_dpm_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- kfree(adev->pm.dpm.ps[i].ps_priv);
- }
+ if (adev->pm.dpm.ps)
+ for (i = 0; i < adev->pm.dpm.num_ps; i++)
+ kfree(adev->pm.dpm.ps[i].ps_priv);
kfree(adev->pm.dpm.ps);
kfree(adev->pm.dpm.priv);
kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
--
2.9.3
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 3/7] drm/amd/amdgpu: Clean up SI DPM table assignments
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2016-09-06 16:18 ` [PATCH 1/7] drm/amd/amdgpu: Tidy up SI DMA code Tom St Denis
2016-09-06 16:18 ` [PATCH 2/7] drm/amd/amdgpu: Allow calling si_dpm_fini at any point Tom St Denis
@ 2016-09-06 16:18 ` Tom St Denis
2016-09-06 16:18 ` [PATCH 4/7] drm/amd/amdgpu: Correct whitespace in SI DPM code Tom St Denis
` (6 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
Hoist common assignments out of cases.
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/si_dpm.c | 42 +++++++++----------------------------
1 file changed, 10 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 2a72b2b5801d..8d623912891c 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -2014,39 +2014,28 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
break;
}
} else if (adev->asic_type == CHIP_PITCAIRN) {
+ si_pi->cac_weights = cac_weights_pitcairn;
+ si_pi->lcac_config = lcac_pitcairn;
+ si_pi->cac_override = cac_override_pitcairn;
+ si_pi->powertune_data = &powertune_data_pitcairn;
+
switch (adev->pdev->device) {
case 0x6810:
case 0x6818:
- si_pi->cac_weights = cac_weights_pitcairn;
- si_pi->lcac_config = lcac_pitcairn;
- si_pi->cac_override = cac_override_pitcairn;
- si_pi->powertune_data = &powertune_data_pitcairn;
si_pi->dte_data = dte_data_curacao_xt;
update_dte_from_pl2 = true;
break;
case 0x6819:
case 0x6811:
- si_pi->cac_weights = cac_weights_pitcairn;
- si_pi->lcac_config = lcac_pitcairn;
- si_pi->cac_override = cac_override_pitcairn;
- si_pi->powertune_data = &powertune_data_pitcairn;
si_pi->dte_data = dte_data_curacao_pro;
update_dte_from_pl2 = true;
break;
case 0x6800:
case 0x6806:
- si_pi->cac_weights = cac_weights_pitcairn;
- si_pi->lcac_config = lcac_pitcairn;
- si_pi->cac_override = cac_override_pitcairn;
- si_pi->powertune_data = &powertune_data_pitcairn;
si_pi->dte_data = dte_data_neptune_xt;
update_dte_from_pl2 = true;
break;
default:
- si_pi->cac_weights = cac_weights_pitcairn;
- si_pi->lcac_config = lcac_pitcairn;
- si_pi->cac_override = cac_override_pitcairn;
- si_pi->powertune_data = &powertune_data_pitcairn;
si_pi->dte_data = dte_data_pitcairn;
break;
}
@@ -2102,16 +2091,17 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
break;
}
} else if (adev->asic_type == CHIP_OLAND) {
+ si_pi->lcac_config = lcac_mars_pro;
+ si_pi->cac_override = cac_override_oland;
+ si_pi->powertune_data = &powertune_data_mars_pro;
+ si_pi->dte_data = dte_data_mars_pro;
+
switch (adev->pdev->device) {
case 0x6601:
case 0x6621:
case 0x6603:
case 0x6605:
si_pi->cac_weights = cac_weights_mars_pro;
- si_pi->lcac_config = lcac_mars_pro;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_mars_pro;
- si_pi->dte_data = dte_data_mars_pro;
update_dte_from_pl2 = true;
break;
case 0x6600:
@@ -2119,28 +2109,16 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
case 0x6620:
case 0x6604:
si_pi->cac_weights = cac_weights_mars_xt;
- si_pi->lcac_config = lcac_mars_pro;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_mars_pro;
- si_pi->dte_data = dte_data_mars_pro;
update_dte_from_pl2 = true;
break;
case 0x6611:
case 0x6613:
case 0x6608:
si_pi->cac_weights = cac_weights_oland_pro;
- si_pi->lcac_config = lcac_mars_pro;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_mars_pro;
- si_pi->dte_data = dte_data_mars_pro;
update_dte_from_pl2 = true;
break;
case 0x6610:
si_pi->cac_weights = cac_weights_oland_xt;
- si_pi->lcac_config = lcac_mars_pro;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_mars_pro;
- si_pi->dte_data = dte_data_mars_pro;
update_dte_from_pl2 = true;
break;
default:
--
2.9.3
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 4/7] drm/amd/amdgpu: Correct whitespace in SI DPM code
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
` (2 preceding siblings ...)
2016-09-06 16:18 ` [PATCH 3/7] drm/amd/amdgpu: Clean up SI DPM table assignments Tom St Denis
@ 2016-09-06 16:18 ` Tom St Denis
2016-09-06 16:18 ` [PATCH 5/7] drm/amd/amdgpu: Tidy up SI IH code Tom St Denis
` (5 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
Replace 8 spaces with tabs, correct {} braces, etc.
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/si_dpm.c | 441 +++++++++++++++++-------------------
1 file changed, 209 insertions(+), 232 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8d623912891c..8e6bbaf380d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -84,11 +84,11 @@ union fan_info {
};
union pplib_clock_info {
- struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
- struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
- struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
};
const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
@@ -1852,8 +1852,8 @@ extern u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg);
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
{
- struct si_power_info *pi = adev->pm.dpm.priv;
- return pi;
+ struct si_power_info *pi = adev->pm.dpm.priv;
+ return pi;
}
static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
@@ -1954,23 +1954,23 @@ static void si_update_dte_from_pl2(struct amdgpu_device *adev,
struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
{
- struct rv7xx_power_info *pi = adev->pm.dpm.priv;
+ struct rv7xx_power_info *pi = adev->pm.dpm.priv;
- return pi;
+ return pi;
}
struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
{
- struct ni_power_info *pi = adev->pm.dpm.priv;
+ struct ni_power_info *pi = adev->pm.dpm.priv;
- return pi;
+ return pi;
}
struct si_ps *si_get_ps(struct amdgpu_ps *aps)
{
- struct si_ps *ps = aps->ps_priv;
+ struct si_ps *ps = aps->ps_priv;
- return ps;
+ return ps;
}
static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
@@ -2147,7 +2147,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
si_pi->enable_dte = false;
if (si_pi->powertune_data->enable_powertune_by_default) {
- ni_pi->enable_power_containment= true;
+ ni_pi->enable_power_containment = true;
ni_pi->enable_cac = true;
if (si_pi->dte_data.enable_dte_by_default) {
si_pi->enable_dte = true;
@@ -2426,13 +2426,12 @@ static int si_populate_power_containment_values(struct amdgpu_device *adev,
if ((max_ps_percent == 0) ||
(prev_sclk == max_sclk) ||
- disable_uvd_power_tune) {
+ disable_uvd_power_tune)
min_sclk = max_sclk;
- } else if (i == 1) {
+ else if (i == 1)
min_sclk = prev_sclk;
- } else {
+ else
min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
- }
if (min_sclk < state->performance_levels[0].sclk)
min_sclk = state->performance_levels[0].sclk;
@@ -2632,7 +2631,6 @@ static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
u32 i;
u32 v0_loadline;
-
if (table == NULL)
return -EINVAL;
@@ -3079,14 +3077,14 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
{
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we never hit the non-gddr5 limit so disable it */
- u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+ u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
- if (vblank_time < switch_limit)
- return true;
- else
- return false;
+ if (vblank_time < switch_limit)
+ return true;
+ else
+ return false;
}
@@ -3099,47 +3097,47 @@ static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
u32 mc_cg_config;
switch (arb_freq_src) {
- case MC_CG_ARB_FREQ_F0:
+ case MC_CG_ARB_FREQ_F0:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
break;
- case MC_CG_ARB_FREQ_F1:
+ case MC_CG_ARB_FREQ_F1:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
break;
- case MC_CG_ARB_FREQ_F2:
+ case MC_CG_ARB_FREQ_F2:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
break;
- case MC_CG_ARB_FREQ_F3:
+ case MC_CG_ARB_FREQ_F3:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
break;
- default:
+ default:
return -EINVAL;
}
switch (arb_freq_dest) {
- case MC_CG_ARB_FREQ_F0:
+ case MC_CG_ARB_FREQ_F0:
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
break;
- case MC_CG_ARB_FREQ_F1:
+ case MC_CG_ARB_FREQ_F1:
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
break;
- case MC_CG_ARB_FREQ_F2:
+ case MC_CG_ARB_FREQ_F2:
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
break;
- case MC_CG_ARB_FREQ_F3:
+ case MC_CG_ARB_FREQ_F3:
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
@@ -3158,9 +3156,9 @@ static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
static void ni_update_current_ps(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
- struct si_ps *new_ps = si_get_ps(rps);
+ struct si_ps *new_ps = si_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
@@ -3170,9 +3168,9 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
static void ni_update_requested_ps(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
- struct si_ps *new_ps = si_get_ps(rps);
+ struct si_ps *new_ps = si_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
@@ -3183,8 +3181,8 @@ static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
struct amdgpu_ps *new_ps,
struct amdgpu_ps *old_ps)
{
- struct si_ps *new_state = si_get_ps(new_ps);
- struct si_ps *current_state = si_get_ps(old_ps);
+ struct si_ps *new_state = si_get_ps(new_ps);
+ struct si_ps *current_state = si_get_ps(old_ps);
if ((new_ps->vclk == old_ps->vclk) &&
(new_ps->dclk == old_ps->dclk))
@@ -3201,8 +3199,8 @@ static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
struct amdgpu_ps *new_ps,
struct amdgpu_ps *old_ps)
{
- struct si_ps *new_state = si_get_ps(new_ps);
- struct si_ps *current_state = si_get_ps(old_ps);
+ struct si_ps *new_state = si_get_ps(new_ps);
+ struct si_ps *current_state = si_get_ps(old_ps);
if ((new_ps->vclk == old_ps->vclk) &&
(new_ps->dclk == old_ps->dclk))
@@ -3217,134 +3215,133 @@ static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
{
- unsigned int i;
+ unsigned int i;
- for (i = 0; i < table->count; i++) {
- if (voltage <= table->entries[i].value)
- return table->entries[i].value;
- }
+ for (i = 0; i < table->count; i++)
+ if (voltage <= table->entries[i].value)
+ return table->entries[i].value;
- return table->entries[table->count - 1].value;
+ return table->entries[table->count - 1].value;
}
static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
- u32 max_clock, u32 requested_clock)
+ u32 max_clock, u32 requested_clock)
{
- unsigned int i;
+ unsigned int i;
- if ((clocks == NULL) || (clocks->count == 0))
- return (requested_clock < max_clock) ? requested_clock : max_clock;
+ if ((clocks == NULL) || (clocks->count == 0))
+ return (requested_clock < max_clock) ? requested_clock : max_clock;
- for (i = 0; i < clocks->count; i++) {
- if (clocks->values[i] >= requested_clock)
- return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
- }
+ for (i = 0; i < clocks->count; i++) {
+ if (clocks->values[i] >= requested_clock)
+ return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+ }
- return (clocks->values[clocks->count - 1] < max_clock) ?
- clocks->values[clocks->count - 1] : max_clock;
+ return (clocks->values[clocks->count - 1] < max_clock) ?
+ clocks->values[clocks->count - 1] : max_clock;
}
static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
- u32 max_mclk, u32 requested_mclk)
+ u32 max_mclk, u32 requested_mclk)
{
- return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
- max_mclk, requested_mclk);
+ return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
+ max_mclk, requested_mclk);
}
static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
- u32 max_sclk, u32 requested_sclk)
+ u32 max_sclk, u32 requested_sclk)
{
- return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
- max_sclk, requested_sclk);
+ return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
+ max_sclk, requested_sclk);
}
void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
- u32 *max_clock)
+ u32 *max_clock)
{
- u32 i, clock = 0;
+ u32 i, clock = 0;
- if ((table == NULL) || (table->count == 0)) {
- *max_clock = clock;
- return;
- }
+ if ((table == NULL) || (table->count == 0)) {
+ *max_clock = clock;
+ return;
+ }
- for (i = 0; i < table->count; i++) {
- if (clock < table->entries[i].clk)
- clock = table->entries[i].clk;
- }
- *max_clock = clock;
+ for (i = 0; i < table->count; i++) {
+ if (clock < table->entries[i].clk)
+ clock = table->entries[i].clk;
+ }
+ *max_clock = clock;
}
static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
- u32 clock, u16 max_voltage, u16 *voltage)
+ u32 clock, u16 max_voltage, u16 *voltage)
{
- u32 i;
+ u32 i;
- if ((table == NULL) || (table->count == 0))
- return;
+ if ((table == NULL) || (table->count == 0))
+ return;
- for (i= 0; i < table->count; i++) {
- if (clock <= table->entries[i].clk) {
- if (*voltage < table->entries[i].v)
- *voltage = (u16)((table->entries[i].v < max_voltage) ?
- table->entries[i].v : max_voltage);
- return;
- }
- }
+ for (i= 0; i < table->count; i++) {
+ if (clock <= table->entries[i].clk) {
+ if (*voltage < table->entries[i].v)
+ *voltage = (u16)((table->entries[i].v < max_voltage) ?
+ table->entries[i].v : max_voltage);
+ return;
+ }
+ }
- *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+ *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
}
static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
- const struct amdgpu_clock_and_voltage_limits *max_limits,
- struct rv7xx_pl *pl)
+ const struct amdgpu_clock_and_voltage_limits *max_limits,
+ struct rv7xx_pl *pl)
{
- if ((pl->mclk == 0) || (pl->sclk == 0))
- return;
+ if ((pl->mclk == 0) || (pl->sclk == 0))
+ return;
- if (pl->mclk == pl->sclk)
- return;
+ if (pl->mclk == pl->sclk)
+ return;
- if (pl->mclk > pl->sclk) {
- if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
- pl->sclk = btc_get_valid_sclk(adev,
- max_limits->sclk,
- (pl->mclk +
- (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
- adev->pm.dpm.dyn_state.mclk_sclk_ratio);
- } else {
- if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
- pl->mclk = btc_get_valid_mclk(adev,
- max_limits->mclk,
- pl->sclk -
- adev->pm.dpm.dyn_state.sclk_mclk_delta);
- }
+ if (pl->mclk > pl->sclk) {
+ if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
+ pl->sclk = btc_get_valid_sclk(adev,
+ max_limits->sclk,
+ (pl->mclk +
+ (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+ adev->pm.dpm.dyn_state.mclk_sclk_ratio);
+ } else {
+ if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
+ pl->mclk = btc_get_valid_mclk(adev,
+ max_limits->mclk,
+ pl->sclk -
+ adev->pm.dpm.dyn_state.sclk_mclk_delta);
+ }
}
static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
- u16 max_vddc, u16 max_vddci,
- u16 *vddc, u16 *vddci)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- u16 new_voltage;
-
- if ((0 == *vddc) || (0 == *vddci))
- return;
-
- if (*vddc > *vddci) {
- if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
- new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
- (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
- *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
- }
- } else {
- if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
- new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
- (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
- *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
- }
- }
+ u16 max_vddc, u16 max_vddci,
+ u16 *vddc, u16 *vddci)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ u16 new_voltage;
+
+ if ((0 == *vddc) || (0 == *vddci))
+ return;
+
+ if (*vddc > *vddci) {
+ if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+ new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+ (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+ *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+ }
+ } else {
+ if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+ new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+ (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+ *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+ }
+ }
}
static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
@@ -3626,9 +3623,9 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
}
- for (i = 0; i < ps->performance_level_count; i++)
- btc_adjust_clock_combinations(adev, max_limits,
- &ps->performance_levels[i]);
+ for (i = 0; i < ps->performance_level_count; i++)
+ btc_adjust_clock_combinations(adev, max_limits,
+ &ps->performance_levels[i]);
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].vddc < min_vce_voltage)
@@ -3767,7 +3764,7 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
case 0:
default:
want_thermal_protection = false;
- break;
+ break;
case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
@@ -3969,7 +3966,7 @@ static int si_process_firmware_header(struct amdgpu_device *adev)
if (ret)
return ret;
- si_pi->state_table_start = tmp;
+ si_pi->state_table_start = tmp;
ret = si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
@@ -4128,7 +4125,7 @@ static void si_program_response_times(struct amdgpu_device *adev)
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
+ backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
@@ -4367,14 +4364,11 @@ static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
static int si_upload_firmware(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
- int ret;
si_reset_smc(adev);
si_stop_smc_clock(adev);
- ret = si_load_smc_ucode(adev, si_pi->sram_end);
-
- return ret;
+ return si_load_smc_ucode(adev, si_pi->sram_end);
}
static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
@@ -4790,7 +4784,7 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
amdgpu_atombios_set_engine_dram_timings(adev,
pl->sclk,
- pl->mclk);
+ pl->mclk);
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -4825,7 +4819,7 @@ static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
si_pi->sram_end);
if (ret)
break;
- }
+ }
return ret;
}
@@ -4938,9 +4932,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
reg = CG_R(0xffff) | CG_L(0);
table->initialState.levels[0].aT = cpu_to_be32(reg);
-
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
-
table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
@@ -5302,9 +5294,9 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(2);
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
if (pi->sclk_ss) {
struct amdgpu_atom_ss ss;
@@ -5411,15 +5403,15 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
tmp = freq_nom / reference_clock;
tmp = tmp * tmp;
if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
u32 clks = reference_clock * 5 / ss.rate;
u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
- mpll_ss1 &= ~CLKV_MASK;
- mpll_ss1 |= CLKV(clkv);
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
- mpll_ss2 &= ~CLKS_MASK;
- mpll_ss2 |= CLKS(clks);
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
}
}
@@ -5746,7 +5738,7 @@ static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
ni_pi->enable_power_containment = false;
ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
- if (ret)
+ if (ret)
ni_pi->enable_sq_ramping = false;
return si_populate_smc_t(adev, amdgpu_state, smc_state);
@@ -5771,10 +5763,8 @@ static int si_upload_sw_state(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
- state_size, si_pi->sram_end);
-
- return ret;
+ return si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+ state_size, si_pi->sram_end);
}
static int si_upload_ulv_state(struct amdgpu_device *adev)
@@ -5915,46 +5905,46 @@ static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
case MC_SEQ_RAS_TIMING:
*out_reg = MC_SEQ_RAS_TIMING_LP;
break;
- case MC_SEQ_CAS_TIMING:
+ case MC_SEQ_CAS_TIMING:
*out_reg = MC_SEQ_CAS_TIMING_LP;
break;
- case MC_SEQ_MISC_TIMING:
+ case MC_SEQ_MISC_TIMING:
*out_reg = MC_SEQ_MISC_TIMING_LP;
break;
- case MC_SEQ_MISC_TIMING2:
+ case MC_SEQ_MISC_TIMING2:
*out_reg = MC_SEQ_MISC_TIMING2_LP;
break;
- case MC_SEQ_RD_CTL_D0:
+ case MC_SEQ_RD_CTL_D0:
*out_reg = MC_SEQ_RD_CTL_D0_LP;
break;
- case MC_SEQ_RD_CTL_D1:
+ case MC_SEQ_RD_CTL_D1:
*out_reg = MC_SEQ_RD_CTL_D1_LP;
break;
- case MC_SEQ_WR_CTL_D0:
+ case MC_SEQ_WR_CTL_D0:
*out_reg = MC_SEQ_WR_CTL_D0_LP;
break;
- case MC_SEQ_WR_CTL_D1:
+ case MC_SEQ_WR_CTL_D1:
*out_reg = MC_SEQ_WR_CTL_D1_LP;
break;
- case MC_PMG_CMD_EMRS:
+ case MC_PMG_CMD_EMRS:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
break;
- case MC_PMG_CMD_MRS:
+ case MC_PMG_CMD_MRS:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP;
break;
- case MC_PMG_CMD_MRS1:
+ case MC_PMG_CMD_MRS1:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
break;
- case MC_SEQ_PMG_TIMING:
+ case MC_SEQ_PMG_TIMING:
*out_reg = MC_SEQ_PMG_TIMING_LP;
break;
- case MC_PMG_CMD_MRS2:
+ case MC_PMG_CMD_MRS2:
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
break;
- case MC_SEQ_WR_CTL_2:
+ case MC_SEQ_WR_CTL_2:
*out_reg = MC_SEQ_WR_CTL_2_LP;
break;
- default:
+ default:
result = false;
break;
}
@@ -6041,19 +6031,19 @@ static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
- ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
- if (ret)
- goto init_mc_done;
+ ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+ if (ret)
+ goto init_mc_done;
- ret = si_copy_vbios_mc_reg_table(table, si_table);
- if (ret)
- goto init_mc_done;
+ ret = si_copy_vbios_mc_reg_table(table, si_table);
+ if (ret)
+ goto init_mc_done;
si_set_s0_mc_reg_index(si_table);
ret = si_set_mc_special_registers(adev, si_table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
si_set_valid_flag(si_table);
@@ -6122,7 +6112,7 @@ static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
SMC_SIslands_MCRegisters *mc_reg_table)
{
- struct si_ps *state = si_get_ps(amdgpu_state);
+ struct si_ps *state = si_get_ps(amdgpu_state);
int i;
for (i = 0; i < state->performance_level_count; i++) {
@@ -6173,7 +6163,7 @@ static int si_populate_mc_reg_table(struct amdgpu_device *adev,
static int si_upload_mc_reg_table(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state)
{
- struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+ struct si_ps *new_state = si_get_ps(amdgpu_new_state);
struct si_power_info *si_pi = si_get_pi(adev);
u32 address = si_pi->mc_reg_table_start +
offsetof(SMC_SIslands_MCRegisters,
@@ -6184,26 +6174,24 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev,
si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
-
return si_copy_bytes_to_smc(adev, address,
(u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
si_pi->sram_end);
-
}
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
{
- if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
- else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ if (enable)
+ WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ else
+ WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
}
static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state)
{
- struct si_ps *state = si_get_ps(amdgpu_state);
+ struct si_ps *state = si_get_ps(amdgpu_state);
int i;
u16 pcie_speed, max_speed = 0;
@@ -6525,25 +6513,17 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
-
fan_table.slope1 = cpu_to_be16(slope1);
fan_table.slope2 = cpu_to_be16(slope2);
-
fan_table.fdo_min = cpu_to_be16(fdo_min);
-
fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
-
fan_table.hys_up = cpu_to_be16(1);
-
fan_table.hys_slope = cpu_to_be16(1);
-
fan_table.temp_resp_lim = cpu_to_be16(5);
-
reference_clock = amdgpu_asic_get_xclk(adev);
fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
reference_clock) / 1600);
-
fan_table.fdo_max = cpu_to_be16((u16)duty100);
tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
@@ -6916,9 +6896,7 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_start_dpm(adev);
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
si_thermal_start_thermal_controller(adev);
-
ni_update_current_ps(adev, boot_ps);
return 0;
@@ -6972,7 +6950,6 @@ static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
struct amdgpu_ps *new_ps = &requested_ps;
ni_update_requested_ps(adev, new_ps);
-
si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
return 0;
@@ -7232,8 +7209,8 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
}
union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
};
static int si_parse_power_table(struct amdgpu_device *adev)
@@ -7248,7 +7225,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct si_ps *ps;
@@ -7896,46 +7873,46 @@ static int si_dpm_get_temp(struct amdgpu_device *adev)
static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
- if (low)
- return requested_state->performance_levels[0].sclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+ if (low)
+ return requested_state->performance_levels[0].sclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
- if (low)
- return requested_state->performance_levels[0].mclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+ if (low)
+ return requested_state->performance_levels[0].mclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
}
static void si_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct si_ps *ps = si_get_ps(rps);
- struct rv7xx_pl *pl;
- int i;
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->performance_level_count; i++) {
- pl = &ps->performance_levels[i];
- if (adev->asic_type >= CHIP_TAHITI)
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *ps = si_get_ps(rps);
+ struct rv7xx_pl *pl;
+ int i;
+
+ amdgpu_dpm_print_class_info(rps->class, rps->class2);
+ amdgpu_dpm_print_cap_info(rps->caps);
+ DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->performance_level_count; i++) {
+ pl = &ps->performance_levels[i];
+ if (adev->asic_type >= CHIP_TAHITI)
+ DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
- else
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ else
+ DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
- }
- amdgpu_dpm_print_ps_status(adev, rps);
+ }
+ amdgpu_dpm_print_ps_status(adev, rps);
}
static int si_dpm_early_init(void *handle)
--
2.9.3
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 5/7] drm/amd/amdgpu: Tidy up SI IH code
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
` (3 preceding siblings ...)
2016-09-06 16:18 ` [PATCH 4/7] drm/amd/amdgpu: Correct whitespace in SI DPM code Tom St Denis
@ 2016-09-06 16:18 ` Tom St Denis
2016-09-06 16:18 ` [PATCH 6/7] drm/amd/amdgpu: Tidy up SI SMC code Tom St Denis
` (4 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/si_ih.c | 30 ++++++++----------------------
1 file changed, 8 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 994ff02db013..8fae3d4a2360 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -57,7 +57,6 @@ static void si_ih_disable_interrupts(struct amdgpu_device *adev)
static int si_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -72,18 +71,15 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
- ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
- IH_WPTR_OVERFLOW_CLEAR |
- (rb_bufsz << 1));
-
- ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+ ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1) |
+ IH_WPTR_WRITEBACK_ENABLE;
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
-
WREG32(IH_RB_CNTL, ih_rb_cntl);
-
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
@@ -93,10 +89,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
WREG32(IH_CNTL, ih_cntl);
pci_set_master(adev->pdev);
-
si_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
static void si_ih_irq_disable(struct amdgpu_device *adev)
@@ -165,9 +160,7 @@ static int si_ih_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_irq_init(adev);
-
- return r;
+ return amdgpu_irq_init(adev);
}
static int si_ih_sw_fini(void *handle)
@@ -182,14 +175,9 @@ static int si_ih_sw_fini(void *handle)
static int si_ih_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = si_ih_irq_init(adev);
- if (r)
- return r;
-
- return 0;
+ return si_ih_irq_init(adev);
}
static int si_ih_hw_fini(void *handle)
@@ -229,12 +217,10 @@ static bool si_ih_is_idle(void *handle)
static int si_ih_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
- if (!tmp)
+ if (si_ih_is_idle(handle))
return 0;
udelay(1);
}
--
2.9.3
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 6/7] drm/amd/amdgpu: Tidy up SI SMC code
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
` (4 preceding siblings ...)
2016-09-06 16:18 ` [PATCH 5/7] drm/amd/amdgpu: Tidy up SI IH code Tom St Denis
@ 2016-09-06 16:18 ` Tom St Denis
2016-09-06 16:18 ` [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct Tom St Denis
` (3 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
As well as merge SMC clock functions into one to reduce LOC.
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/si_dpm.c | 6 +++---
drivers/gpu/drm/amd/amdgpu/si_smc.c | 25 ++++++++-----------------
drivers/gpu/drm/amd/amdgpu/sislands_smc.h | 3 +--
3 files changed, 12 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8e6bbaf380d0..7b9debefd64a 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3944,13 +3944,13 @@ static void si_dpm_start_smc(struct amdgpu_device *adev)
{
si_program_jump_on_start(adev);
si_start_smc(adev);
- si_start_smc_clock(adev);
+ si_smc_clock(adev, true);
}
static void si_dpm_stop_smc(struct amdgpu_device *adev)
{
si_reset_smc(adev);
- si_stop_smc_clock(adev);
+ si_smc_clock(adev, false);
}
static int si_process_firmware_header(struct amdgpu_device *adev)
@@ -4366,7 +4366,7 @@ static int si_upload_firmware(struct amdgpu_device *adev)
struct si_power_info *si_pi = si_get_pi(adev);
si_reset_smc(adev);
- si_stop_smc_clock(adev);
+ si_smc_clock(adev, false);
return si_load_smc_ucode(adev, si_pi->sram_end);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
index c1c259464ae7..2cc628cd247c 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -84,7 +84,6 @@ int si_copy_bytes_to_smc(struct amdgpu_device *adev,
goto done;
original_data = RREG32(SMC_IND_DATA_0);
-
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
@@ -94,7 +93,6 @@ int si_copy_bytes_to_smc(struct amdgpu_device *adev,
}
data <<= extra_shift;
-
data |= (original_data & ~((~0UL) << extra_shift));
ret = si_set_smc_sram_address(adev, addr, limit);
@@ -128,8 +126,8 @@ void si_reset_smc(struct amdgpu_device *adev)
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
- tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
- tmp |= RST_REG;
+ tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
+ RST_REG;
WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
}
@@ -140,20 +138,14 @@ int si_program_jump_on_start(struct amdgpu_device *adev)
return si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
}
-void si_stop_smc_clock(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
-
- tmp |= CK_DISABLE;
-
- WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
-}
-
-void si_start_smc_clock(struct amdgpu_device *adev)
+void si_smc_clock(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
- tmp &= ~CK_DISABLE;
+ if (enable)
+ tmp |= CK_DISABLE;
+ else
+ tmp &= ~CK_DISABLE;
WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
}
@@ -185,9 +177,8 @@ PPSMC_Result si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
break;
udelay(1);
}
- tmp = RREG32(SMC_RESP_0);
- return (PPSMC_Result)tmp;
+ return (PPSMC_Result)RREG32(SMC_RESP_0);
}
PPSMC_Result si_wait_for_smc_inactive(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
index 542ab46aeb40..c5b3377df2da 100644
--- a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
+++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
@@ -408,8 +408,7 @@ int si_copy_bytes_to_smc(struct amdgpu_device *adev,
void si_start_smc(struct amdgpu_device *adev);
void si_reset_smc(struct amdgpu_device *adev);
int si_program_jump_on_start(struct amdgpu_device *adev);
-void si_stop_smc_clock(struct amdgpu_device *adev);
-void si_start_smc_clock(struct amdgpu_device *adev);
+void si_smc_clock(struct amdgpu_device *adev, bool enable);
bool si_is_smc_running(struct amdgpu_device *adev);
PPSMC_Result si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
PPSMC_Result si_wait_for_smc_inactive(struct amdgpu_device *adev);
--
2.9.3
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
` (5 preceding siblings ...)
2016-09-06 16:18 ` [PATCH 6/7] drm/amd/amdgpu: Tidy up SI SMC code Tom St Denis
@ 2016-09-06 16:18 ` Tom St Denis
[not found] ` <20160906161844.21370-8-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2016-09-07 2:19 ` Various tidy'ups with SI DMA/DPM/SMC/IH code Edward O'Callaghan
` (2 subsequent siblings)
9 siblings, 1 reply; 14+ messages in thread
From: Tom St Denis @ 2016-09-06 16:18 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
The tahiti_le structure is not currently used. Comment it out
to avoid warnings.
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/si_dpm.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 7b9debefd64a..92a041755e8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -340,6 +340,7 @@ static const struct si_dte_data dte_data_tahiti =
false
};
+#if 0
static const struct si_dte_data dte_data_tahiti_le =
{
{ 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
@@ -357,6 +358,7 @@ static const struct si_dte_data dte_data_tahiti_le =
85,
true
};
+#endif
static const struct si_dte_data dte_data_tahiti_pro =
{
--
2.9.3
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: Various tidy'ups with SI DMA/DPM/SMC/IH code
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
` (6 preceding siblings ...)
2016-09-06 16:18 ` [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct Tom St Denis
@ 2016-09-07 2:19 ` Edward O'Callaghan
2016-09-07 12:38 ` Huang Rui
2016-09-07 14:46 ` Deucher, Alexander
9 siblings, 0 replies; 14+ messages in thread
From: Edward O'Callaghan @ 2016-09-07 2:19 UTC (permalink / raw)
To: Tom St Denis, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
[-- Attachment #1.1.1: Type: text/plain, Size: 544 bytes --]
Patches 1-6 are,
Reviewed-by: Edward O'Callaghan <funfunctor-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
Patch 7 is,
Acked-by: Edward O'Callaghan <funfunctor-dczkZgxz+BNUPWh3PAxdjQ@public.gmane.org>
On 09/07/2016 02:18 AM, Tom St Denis wrote:
> Various cleanups including simplifications, LOC reductions
> and whitespace corrections.
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 819 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: Various tidy'ups with SI DMA/DPM/SMC/IH code
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
` (7 preceding siblings ...)
2016-09-07 2:19 ` Various tidy'ups with SI DMA/DPM/SMC/IH code Edward O'Callaghan
@ 2016-09-07 12:38 ` Huang Rui
2016-09-07 14:46 ` Deucher, Alexander
9 siblings, 0 replies; 14+ messages in thread
From: Huang Rui @ 2016-09-07 12:38 UTC (permalink / raw)
To: Tom St Denis; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
On Tue, Sep 06, 2016 at 12:18:37PM -0400, Tom St Denis wrote:
> Various cleanups including simplifications, LOC reductions
> and whitespace corrections.
>
For the whole patch set:
Reviewed-by: Huang Rui <ray.huang@amd.com>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 14+ messages in thread
* RE: [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct
[not found] ` <20160906161844.21370-8-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
@ 2016-09-07 14:45 ` Deucher, Alexander
[not found] ` <MWHPR12MB16946A5F5069CA4CAE4ABA7BF7F80-Gy0DoCVfaSW4WA4dJ5YXGAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
0 siblings, 1 reply; 14+ messages in thread
From: Deucher, Alexander @ 2016-09-07 14:45 UTC (permalink / raw)
To: 'Tom St Denis', amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
Cc: StDenis, Tom
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf
> Of Tom St Denis
> Sent: Tuesday, September 06, 2016 12:19 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: StDenis, Tom
> Subject: [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI
> DPM struct
>
> The tahiti_le structure is not currently used. Comment it out
> to avoid warnings.
>
> Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Might be worth assigning this on the Tahiti LE boards to see if it helps with the Tahiti LE bug.
Alex
> ---
> drivers/gpu/drm/amd/amdgpu/si_dpm.c | 2 ++
> 1 file changed, 2 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> index 7b9debefd64a..92a041755e8e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> @@ -340,6 +340,7 @@ static const struct si_dte_data dte_data_tahiti =
> false
> };
>
> +#if 0
> static const struct si_dte_data dte_data_tahiti_le =
> {
> { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
> @@ -357,6 +358,7 @@ static const struct si_dte_data dte_data_tahiti_le =
> 85,
> true
> };
> +#endif
>
> static const struct si_dte_data dte_data_tahiti_pro =
> {
> --
> 2.9.3
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 14+ messages in thread
* RE: Various tidy'ups with SI DMA/DPM/SMC/IH code
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
` (8 preceding siblings ...)
2016-09-07 12:38 ` Huang Rui
@ 2016-09-07 14:46 ` Deucher, Alexander
9 siblings, 0 replies; 14+ messages in thread
From: Deucher, Alexander @ 2016-09-07 14:46 UTC (permalink / raw)
To: 'Tom St Denis', amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf
> Of Tom St Denis
> Sent: Tuesday, September 06, 2016 12:19 PM
> To: amd-gfx@lists.freedesktop.org
> Subject: Various tidy'ups with SI DMA/DPM/SMC/IH code
>
> Various cleanups including simplifications, LOC reductions
> and whitespace corrections.
For the series:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct
[not found] ` <MWHPR12MB16946A5F5069CA4CAE4ABA7BF7F80-Gy0DoCVfaSW4WA4dJ5YXGAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2016-09-07 14:49 ` StDenis, Tom
[not found] ` <CY4PR12MB1768FBB1CC469E0C27182B9FF7F80-rpdhrqHFk06yjjPBNVDk/QdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
0 siblings, 1 reply; 14+ messages in thread
From: StDenis, Tom @ 2016-09-07 14:49 UTC (permalink / raw)
To: Deucher, Alexander, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
[-- Attachment #1.1: Type: text/plain, Size: 2012 bytes --]
Hi Alex,
Would love to but unfortunately I don't have a tahiti_le (or any SI) part to test it on.
Cheers,
Tom
________________________________
From: Deucher, Alexander
Sent: Wednesday, September 7, 2016 10:45
To: 'Tom St Denis'; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Cc: StDenis, Tom
Subject: RE: [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org] On Behalf
> Of Tom St Denis
> Sent: Tuesday, September 06, 2016 12:19 PM
> To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> Cc: StDenis, Tom
> Subject: [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI
> DPM struct
>
> The tahiti_le structure is not currently used. Comment it out
> to avoid warnings.
>
> Signed-off-by: Tom St Denis <tom.stdenis-5C7GfCeVMHo@public.gmane.org>
Might be worth assigning this on the Tahiti LE boards to see if it helps with the Tahiti LE bug.
Alex
> ---
> drivers/gpu/drm/amd/amdgpu/si_dpm.c | 2 ++
> 1 file changed, 2 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> index 7b9debefd64a..92a041755e8e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
> @@ -340,6 +340,7 @@ static const struct si_dte_data dte_data_tahiti =
> false
> };
>
> +#if 0
> static const struct si_dte_data dte_data_tahiti_le =
> {
> { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
> @@ -357,6 +358,7 @@ static const struct si_dte_data dte_data_tahiti_le =
> 85,
> true
> };
> +#endif
>
> static const struct si_dte_data dte_data_tahiti_pro =
> {
> --
> 2.9.3
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
[-- Attachment #1.2: Type: text/html, Size: 3547 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct
[not found] ` <CY4PR12MB1768FBB1CC469E0C27182B9FF7F80-rpdhrqHFk06yjjPBNVDk/QdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2016-09-08 1:51 ` Michel Dänzer
0 siblings, 0 replies; 14+ messages in thread
From: Michel Dänzer @ 2016-09-08 1:51 UTC (permalink / raw)
To: StDenis, Tom, Deucher, Alexander; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
On 07/09/16 11:49 PM, StDenis, Tom wrote:
>
> Would love to but unfortunately I don't have a tahiti_le (or any SI)
> part to test it on.
There are potential testers on
https://bugs.freedesktop.org/show_bug.cgi?id=60879 .
--
Earthling Michel Dänzer | http://www.amd.com
Libre software enthusiast | Mesa and X developer
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2016-09-08 1:51 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-06 16:18 Various tidy'ups with SI DMA/DPM/SMC/IH code Tom St Denis
[not found] ` <20160906161844.21370-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2016-09-06 16:18 ` [PATCH 1/7] drm/amd/amdgpu: Tidy up SI DMA code Tom St Denis
2016-09-06 16:18 ` [PATCH 2/7] drm/amd/amdgpu: Allow calling si_dpm_fini at any point Tom St Denis
2016-09-06 16:18 ` [PATCH 3/7] drm/amd/amdgpu: Clean up SI DPM table assignments Tom St Denis
2016-09-06 16:18 ` [PATCH 4/7] drm/amd/amdgpu: Correct whitespace in SI DPM code Tom St Denis
2016-09-06 16:18 ` [PATCH 5/7] drm/amd/amdgpu: Tidy up SI IH code Tom St Denis
2016-09-06 16:18 ` [PATCH 6/7] drm/amd/amdgpu: Tidy up SI SMC code Tom St Denis
2016-09-06 16:18 ` [PATCH 7/7] drm/amd/amdgpu: Comment out currently unused SI DPM struct Tom St Denis
[not found] ` <20160906161844.21370-8-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2016-09-07 14:45 ` Deucher, Alexander
[not found] ` <MWHPR12MB16946A5F5069CA4CAE4ABA7BF7F80-Gy0DoCVfaSW4WA4dJ5YXGAdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2016-09-07 14:49 ` StDenis, Tom
[not found] ` <CY4PR12MB1768FBB1CC469E0C27182B9FF7F80-rpdhrqHFk06yjjPBNVDk/QdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2016-09-08 1:51 ` Michel Dänzer
2016-09-07 2:19 ` Various tidy'ups with SI DMA/DPM/SMC/IH code Edward O'Callaghan
2016-09-07 12:38 ` Huang Rui
2016-09-07 14:46 ` Deucher, Alexander
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.