* [PATCH 1/2] drm/amd/amdgpu: Use 32/64-bit types in gmc6
@ 2017-05-15 18:25 Tom St Denis
[not found] ` <20170515182557.10280-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
0 siblings, 1 reply; 5+ messages in thread
From: Tom St Denis @ 2017-05-15 18:25 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
Swap uNN for uintNN_t.
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 62 +++++++++++++++++------------------
1 file changed, 31 insertions(+), 31 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d860939152df..81f5aa9ff719 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -56,7 +56,7 @@ MODULE_FIRMWARE("radeon/si58_mc.bin");
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
-static const u32 crtc_offsets[6] =
+static const uint32_t crtc_offsets[6] =
{
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
@@ -69,7 +69,7 @@ static const u32 crtc_offsets[6] =
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
- u32 blackout;
+ uint32_t blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
@@ -93,7 +93,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
- u32 tmp;
+ uint32_t tmp;
/* unblackout the MC */
tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -165,7 +165,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
{
const __le32 *new_fw_data = NULL;
- u32 running;
+ uint32_t running;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
const struct mc_firmware_header_v1_0 *hdr;
@@ -241,7 +241,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
{
struct amdgpu_mode_mc_save save;
- u32 tmp;
+ uint32_t tmp;
int i, j;
/* Initialize HDP */
@@ -291,7 +291,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
+ uint32_t tmp;
int chansize, numchan;
tmp = RREG32(mmMC_ARB_RAMCFG);
@@ -398,7 +398,7 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
- u32 tmp;
+ uint32_t tmp;
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
@@ -424,7 +424,7 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
+*/
static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
{
- u32 tmp;
+ uint32_t tmp;
if (enable && !adev->mc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
@@ -509,7 +509,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (uint32_t)(adev->dummy_page.addr >> 12));
WREG32(mmVM_CONTEXT0_CNTL2, 0);
WREG32(mmVM_CONTEXT0_CNTL,
VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
@@ -539,7 +539,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
/* enable context1-15 */
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (uint32_t)(adev->dummy_page.addr >> 12));
WREG32(mmVM_CONTEXT1_CNTL2, 4);
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
@@ -628,7 +628,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
- u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
+ uint64_t tmp = RREG32(mmMC_VM_FB_OFFSET);
tmp <<= 22;
adev->vm_manager.vram_base_offset = tmp;
} else
@@ -642,11 +642,11 @@ static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
- u32 status, u32 addr, u32 mc_client)
+ uint32_t status, uint32_t addr, uint32_t mc_client)
{
- u32 mc_id;
- u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
- u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ uint32_t mc_id;
+ uint32_t vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+ uint32_t protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
@@ -662,7 +662,7 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
}
/*
-static const u32 mc_cg_registers[] = {
+static const uint32_t mc_cg_registers[] = {
MC_HUB_MISC_HUB_CG,
MC_HUB_MISC_SIP_CG,
MC_HUB_MISC_VM_CG,
@@ -674,7 +674,7 @@ static const u32 mc_cg_registers[] = {
VM_L2_CG,
};
-static const u32 mc_cg_ls_en[] = {
+static const uint32_t mc_cg_ls_en[] = {
MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
@@ -686,7 +686,7 @@ static const u32 mc_cg_ls_en[] = {
VM_L2_CG__MEM_LS_ENABLE_MASK,
};
-static const u32 mc_cg_en[] = {
+static const uint32_t mc_cg_en[] = {
MC_HUB_MISC_HUB_CG__ENABLE_MASK,
MC_HUB_MISC_SIP_CG__ENABLE_MASK,
MC_HUB_MISC_VM_CG__ENABLE_MASK,
@@ -702,7 +702,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
bool enable)
{
int i;
- u32 orig, data;
+ uint32_t orig, data;
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
@@ -719,7 +719,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
bool enable)
{
int i;
- u32 orig, data;
+ uint32_t orig, data;
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
@@ -735,7 +735,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
bool enable)
{
- u32 orig, data;
+ uint32_t orig, data;
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
@@ -758,7 +758,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
bool enable)
{
- u32 orig, data;
+ uint32_t orig, data;
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
@@ -774,7 +774,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
bool enable)
{
- u32 orig, data;
+ uint32_t orig, data;
orig = data = RREG32(mmHDP_MEM_POWER_LS);
@@ -818,7 +818,7 @@ static int gmc_v6_0_early_init(void *handle)
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ uint32_t tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
@@ -972,7 +972,7 @@ static int gmc_v6_0_resume(void *handle)
static bool gmc_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(mmSRBM_STATUS);
+ uint32_t tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
@@ -984,7 +984,7 @@ static bool gmc_v6_0_is_idle(void *handle)
static int gmc_v6_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
+ uint32_t tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
@@ -1005,8 +1005,8 @@ static int gmc_v6_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_mode_mc_save save;
- u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(mmSRBM_STATUS);
+ uint32_t srbm_soft_reset = 0;
+ uint32_t tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
@@ -1052,8 +1052,8 @@ static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp;
- u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ uint32_t tmp;
+ uint32_t bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@@ -1088,7 +1088,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status;
+ uint32_t addr, status;
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
--
2.12.0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] drm/amd/amdgpu: Clean up gmc6 wait_for_idle
[not found] ` <20170515182557.10280-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
@ 2017-05-15 18:25 ` Tom St Denis
[not found] ` <20170515182557.10280-2-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2017-05-16 10:51 ` [PATCH 1/2] drm/amd/amdgpu: Use 32/64-bit types in gmc6 Christian König
1 sibling, 1 reply; 5+ messages in thread
From: Tom St Denis @ 2017-05-15 18:25 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tom St Denis
Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 81f5aa9ff719..27db0710e9ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -984,16 +984,10 @@ static bool gmc_v6_0_is_idle(void *handle)
static int gmc_v6_0_wait_for_idle(void *handle)
{
unsigned i;
- uint32_t tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC_BUSY_MASK);
- if (!tmp)
+ if (gmc_v6_0_is_idle(handle))
return 0;
udelay(1);
}
--
2.12.0
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] drm/amd/amdgpu: Use 32/64-bit types in gmc6
[not found] ` <20170515182557.10280-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2017-05-15 18:25 ` [PATCH 2/2] drm/amd/amdgpu: Clean up gmc6 wait_for_idle Tom St Denis
@ 2017-05-16 10:51 ` Christian König
[not found] ` <2f57261b-d94a-9f2d-979d-74277bf29d64-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
1 sibling, 1 reply; 5+ messages in thread
From: Christian König @ 2017-05-16 10:51 UTC (permalink / raw)
To: Tom St Denis, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
Am 15.05.2017 um 20:25 schrieb Tom St Denis:
> Swap uNN for uintNN_t.
>
> Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Do we actually do something else than busy waiting for idle in one of
the modules in the wait_for_idle callback?
If no, might be a good candidate for a cleanup as well,
Christian.
> ---
> drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 62 +++++++++++++++++------------------
> 1 file changed, 31 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> index d860939152df..81f5aa9ff719 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> @@ -56,7 +56,7 @@ MODULE_FIRMWARE("radeon/si58_mc.bin");
> #define MC_SEQ_MISC0__MT__DDR3 0xB0000000
>
>
> -static const u32 crtc_offsets[6] =
> +static const uint32_t crtc_offsets[6] =
> {
> SI_CRTC0_REGISTER_OFFSET,
> SI_CRTC1_REGISTER_OFFSET,
> @@ -69,7 +69,7 @@ static const u32 crtc_offsets[6] =
> static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
> struct amdgpu_mode_mc_save *save)
> {
> - u32 blackout;
> + uint32_t blackout;
>
> if (adev->mode_info.num_crtc)
> amdgpu_display_stop_mc_access(adev, save);
> @@ -93,7 +93,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
> static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
> struct amdgpu_mode_mc_save *save)
> {
> - u32 tmp;
> + uint32_t tmp;
>
> /* unblackout the MC */
> tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
> @@ -165,7 +165,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
> static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
> {
> const __le32 *new_fw_data = NULL;
> - u32 running;
> + uint32_t running;
> const __le32 *new_io_mc_regs = NULL;
> int i, regs_size, ucode_size;
> const struct mc_firmware_header_v1_0 *hdr;
> @@ -241,7 +241,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
> static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
> {
> struct amdgpu_mode_mc_save save;
> - u32 tmp;
> + uint32_t tmp;
> int i, j;
>
> /* Initialize HDP */
> @@ -291,7 +291,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
> static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
> {
>
> - u32 tmp;
> + uint32_t tmp;
> int chansize, numchan;
>
> tmp = RREG32(mmMC_ARB_RAMCFG);
> @@ -398,7 +398,7 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
> static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
> bool value)
> {
> - u32 tmp;
> + uint32_t tmp;
>
> tmp = RREG32(mmVM_CONTEXT1_CNTL);
> tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
> @@ -424,7 +424,7 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
> +*/
> static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
> {
> - u32 tmp;
> + uint32_t tmp;
>
> if (enable && !adev->mc.prt_warning) {
> dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
> @@ -509,7 +509,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
> WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
> WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
> WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
> - (u32)(adev->dummy_page.addr >> 12));
> + (uint32_t)(adev->dummy_page.addr >> 12));
> WREG32(mmVM_CONTEXT0_CNTL2, 0);
> WREG32(mmVM_CONTEXT0_CNTL,
> VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
> @@ -539,7 +539,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>
> /* enable context1-15 */
> WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
> - (u32)(adev->dummy_page.addr >> 12));
> + (uint32_t)(adev->dummy_page.addr >> 12));
> WREG32(mmVM_CONTEXT1_CNTL2, 4);
> WREG32(mmVM_CONTEXT1_CNTL,
> VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
> @@ -628,7 +628,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
>
> /* base offset of vram pages */
> if (adev->flags & AMD_IS_APU) {
> - u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
> + uint64_t tmp = RREG32(mmMC_VM_FB_OFFSET);
> tmp <<= 22;
> adev->vm_manager.vram_base_offset = tmp;
> } else
> @@ -642,11 +642,11 @@ static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
> }
>
> static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
> - u32 status, u32 addr, u32 mc_client)
> + uint32_t status, uint32_t addr, uint32_t mc_client)
> {
> - u32 mc_id;
> - u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
> - u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
> + uint32_t mc_id;
> + uint32_t vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
> + uint32_t protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
> PROTECTIONS);
> char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
> (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
> @@ -662,7 +662,7 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
> }
>
> /*
> -static const u32 mc_cg_registers[] = {
> +static const uint32_t mc_cg_registers[] = {
> MC_HUB_MISC_HUB_CG,
> MC_HUB_MISC_SIP_CG,
> MC_HUB_MISC_VM_CG,
> @@ -674,7 +674,7 @@ static const u32 mc_cg_registers[] = {
> VM_L2_CG,
> };
>
> -static const u32 mc_cg_ls_en[] = {
> +static const uint32_t mc_cg_ls_en[] = {
> MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
> MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
> MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
> @@ -686,7 +686,7 @@ static const u32 mc_cg_ls_en[] = {
> VM_L2_CG__MEM_LS_ENABLE_MASK,
> };
>
> -static const u32 mc_cg_en[] = {
> +static const uint32_t mc_cg_en[] = {
> MC_HUB_MISC_HUB_CG__ENABLE_MASK,
> MC_HUB_MISC_SIP_CG__ENABLE_MASK,
> MC_HUB_MISC_VM_CG__ENABLE_MASK,
> @@ -702,7 +702,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
> bool enable)
> {
> int i;
> - u32 orig, data;
> + uint32_t orig, data;
>
> for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
> orig = data = RREG32(mc_cg_registers[i]);
> @@ -719,7 +719,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
> bool enable)
> {
> int i;
> - u32 orig, data;
> + uint32_t orig, data;
>
> for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
> orig = data = RREG32(mc_cg_registers[i]);
> @@ -735,7 +735,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
> static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
> bool enable)
> {
> - u32 orig, data;
> + uint32_t orig, data;
>
> orig = data = RREG32_PCIE(ixPCIE_CNTL2);
>
> @@ -758,7 +758,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
> static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
> bool enable)
> {
> - u32 orig, data;
> + uint32_t orig, data;
>
> orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
>
> @@ -774,7 +774,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
> static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
> bool enable)
> {
> - u32 orig, data;
> + uint32_t orig, data;
>
> orig = data = RREG32(mmHDP_MEM_POWER_LS);
>
> @@ -818,7 +818,7 @@ static int gmc_v6_0_early_init(void *handle)
> if (adev->flags & AMD_IS_APU) {
> adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
> } else {
> - u32 tmp = RREG32(mmMC_SEQ_MISC0);
> + uint32_t tmp = RREG32(mmMC_SEQ_MISC0);
> tmp &= MC_SEQ_MISC0__MT__MASK;
> adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
> }
> @@ -972,7 +972,7 @@ static int gmc_v6_0_resume(void *handle)
> static bool gmc_v6_0_is_idle(void *handle)
> {
> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> - u32 tmp = RREG32(mmSRBM_STATUS);
> + uint32_t tmp = RREG32(mmSRBM_STATUS);
>
> if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
> SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
> @@ -984,7 +984,7 @@ static bool gmc_v6_0_is_idle(void *handle)
> static int gmc_v6_0_wait_for_idle(void *handle)
> {
> unsigned i;
> - u32 tmp;
> + uint32_t tmp;
> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>
> for (i = 0; i < adev->usec_timeout; i++) {
> @@ -1005,8 +1005,8 @@ static int gmc_v6_0_soft_reset(void *handle)
> {
> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> struct amdgpu_mode_mc_save save;
> - u32 srbm_soft_reset = 0;
> - u32 tmp = RREG32(mmSRBM_STATUS);
> + uint32_t srbm_soft_reset = 0;
> + uint32_t tmp = RREG32(mmSRBM_STATUS);
>
> if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
> srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
> @@ -1052,8 +1052,8 @@ static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
> unsigned type,
> enum amdgpu_interrupt_state state)
> {
> - u32 tmp;
> - u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
> + uint32_t tmp;
> + uint32_t bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
> VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
> VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
> VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
> @@ -1088,7 +1088,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
> struct amdgpu_irq_src *source,
> struct amdgpu_iv_entry *entry)
> {
> - u32 addr, status;
> + uint32_t addr, status;
>
> addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
> status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] drm/amd/amdgpu: Use 32/64-bit types in gmc6
[not found] ` <2f57261b-d94a-9f2d-979d-74277bf29d64-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-05-16 10:54 ` Tom St Denis
0 siblings, 0 replies; 5+ messages in thread
From: Tom St Denis @ 2017-05-16 10:54 UTC (permalink / raw)
To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
On 16/05/17 06:51 AM, Christian König wrote:
> Am 15.05.2017 um 20:25 schrieb Tom St Denis:
>> Swap uNN for uintNN_t.
>>
>> Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
>
> Acked-by: Christian König <christian.koenig@amd.com>
>
> Do we actually do something else than busy waiting for idle in one of
> the modules in the wait_for_idle callback?
>
> If no, might be a good candidate for a cleanup as well,
> Christian.
I've done the "wait_for_idle" cleanup in other modules before where they
inlined the check (and I un-inlined it). They all seem to usleep(1)
while waiting.
Tom
>
>> ---
>> drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 62
>> +++++++++++++++++------------------
>> 1 file changed, 31 insertions(+), 31 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> index d860939152df..81f5aa9ff719 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> @@ -56,7 +56,7 @@ MODULE_FIRMWARE("radeon/si58_mc.bin");
>> #define MC_SEQ_MISC0__MT__DDR3 0xB0000000
>> -static const u32 crtc_offsets[6] =
>> +static const uint32_t crtc_offsets[6] =
>> {
>> SI_CRTC0_REGISTER_OFFSET,
>> SI_CRTC1_REGISTER_OFFSET,
>> @@ -69,7 +69,7 @@ static const u32 crtc_offsets[6] =
>> static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
>> struct amdgpu_mode_mc_save *save)
>> {
>> - u32 blackout;
>> + uint32_t blackout;
>> if (adev->mode_info.num_crtc)
>> amdgpu_display_stop_mc_access(adev, save);
>> @@ -93,7 +93,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device
>> *adev,
>> static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
>> struct amdgpu_mode_mc_save *save)
>> {
>> - u32 tmp;
>> + uint32_t tmp;
>> /* unblackout the MC */
>> tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
>> @@ -165,7 +165,7 @@ static int gmc_v6_0_init_microcode(struct
>> amdgpu_device *adev)
>> static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
>> {
>> const __le32 *new_fw_data = NULL;
>> - u32 running;
>> + uint32_t running;
>> const __le32 *new_io_mc_regs = NULL;
>> int i, regs_size, ucode_size;
>> const struct mc_firmware_header_v1_0 *hdr;
>> @@ -241,7 +241,7 @@ static void gmc_v6_0_vram_gtt_location(struct
>> amdgpu_device *adev,
>> static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
>> {
>> struct amdgpu_mode_mc_save save;
>> - u32 tmp;
>> + uint32_t tmp;
>> int i, j;
>> /* Initialize HDP */
>> @@ -291,7 +291,7 @@ static void gmc_v6_0_mc_program(struct
>> amdgpu_device *adev)
>> static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
>> {
>> - u32 tmp;
>> + uint32_t tmp;
>> int chansize, numchan;
>> tmp = RREG32(mmMC_ARB_RAMCFG);
>> @@ -398,7 +398,7 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct
>> amdgpu_device *adev,
>> static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device
>> *adev,
>> bool value)
>> {
>> - u32 tmp;
>> + uint32_t tmp;
>> tmp = RREG32(mmVM_CONTEXT1_CNTL);
>> tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
>> @@ -424,7 +424,7 @@ static void
>> gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
>> +*/
>> static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
>> {
>> - u32 tmp;
>> + uint32_t tmp;
>> if (enable && !adev->mc.prt_warning) {
>> dev_warn(adev->dev, "Disabling VM faults because of PRT
>> request!\n");
>> @@ -509,7 +509,7 @@ static int gmc_v6_0_gart_enable(struct
>> amdgpu_device *adev)
>> WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
>> WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr
>> >> 12);
>> WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>> - (u32)(adev->dummy_page.addr >> 12));
>> + (uint32_t)(adev->dummy_page.addr >> 12));
>> WREG32(mmVM_CONTEXT0_CNTL2, 0);
>> WREG32(mmVM_CONTEXT0_CNTL,
>> VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
>> @@ -539,7 +539,7 @@ static int gmc_v6_0_gart_enable(struct
>> amdgpu_device *adev)
>> /* enable context1-15 */
>> WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
>> - (u32)(adev->dummy_page.addr >> 12));
>> + (uint32_t)(adev->dummy_page.addr >> 12));
>> WREG32(mmVM_CONTEXT1_CNTL2, 4);
>> WREG32(mmVM_CONTEXT1_CNTL,
>> VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
>> @@ -628,7 +628,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device
>> *adev)
>> /* base offset of vram pages */
>> if (adev->flags & AMD_IS_APU) {
>> - u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
>> + uint64_t tmp = RREG32(mmMC_VM_FB_OFFSET);
>> tmp <<= 22;
>> adev->vm_manager.vram_base_offset = tmp;
>> } else
>> @@ -642,11 +642,11 @@ static void gmc_v6_0_vm_fini(struct
>> amdgpu_device *adev)
>> }
>> static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
>> - u32 status, u32 addr, u32 mc_client)
>> + uint32_t status, uint32_t addr, uint32_t mc_client)
>> {
>> - u32 mc_id;
>> - u32 vmid = REG_GET_FIELD(status,
>> VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
>> - u32 protections = REG_GET_FIELD(status,
>> VM_CONTEXT1_PROTECTION_FAULT_STATUS,
>> + uint32_t mc_id;
>> + uint32_t vmid = REG_GET_FIELD(status,
>> VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
>> + uint32_t protections = REG_GET_FIELD(status,
>> VM_CONTEXT1_PROTECTION_FAULT_STATUS,
>> PROTECTIONS);
>> char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
>> (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
>> @@ -662,7 +662,7 @@ static void gmc_v6_0_vm_decode_fault(struct
>> amdgpu_device *adev,
>> }
>> /*
>> -static const u32 mc_cg_registers[] = {
>> +static const uint32_t mc_cg_registers[] = {
>> MC_HUB_MISC_HUB_CG,
>> MC_HUB_MISC_SIP_CG,
>> MC_HUB_MISC_VM_CG,
>> @@ -674,7 +674,7 @@ static const u32 mc_cg_registers[] = {
>> VM_L2_CG,
>> };
>> -static const u32 mc_cg_ls_en[] = {
>> +static const uint32_t mc_cg_ls_en[] = {
>> MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
>> MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
>> MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
>> @@ -686,7 +686,7 @@ static const u32 mc_cg_ls_en[] = {
>> VM_L2_CG__MEM_LS_ENABLE_MASK,
>> };
>> -static const u32 mc_cg_en[] = {
>> +static const uint32_t mc_cg_en[] = {
>> MC_HUB_MISC_HUB_CG__ENABLE_MASK,
>> MC_HUB_MISC_SIP_CG__ENABLE_MASK,
>> MC_HUB_MISC_VM_CG__ENABLE_MASK,
>> @@ -702,7 +702,7 @@ static void gmc_v6_0_enable_mc_ls(struct
>> amdgpu_device *adev,
>> bool enable)
>> {
>> int i;
>> - u32 orig, data;
>> + uint32_t orig, data;
>> for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
>> orig = data = RREG32(mc_cg_registers[i]);
>> @@ -719,7 +719,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct
>> amdgpu_device *adev,
>> bool enable)
>> {
>> int i;
>> - u32 orig, data;
>> + uint32_t orig, data;
>> for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
>> orig = data = RREG32(mc_cg_registers[i]);
>> @@ -735,7 +735,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct
>> amdgpu_device *adev,
>> static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
>> bool enable)
>> {
>> - u32 orig, data;
>> + uint32_t orig, data;
>> orig = data = RREG32_PCIE(ixPCIE_CNTL2);
>> @@ -758,7 +758,7 @@ static void gmc_v6_0_enable_bif_mgls(struct
>> amdgpu_device *adev,
>> static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
>> bool enable)
>> {
>> - u32 orig, data;
>> + uint32_t orig, data;
>> orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
>> @@ -774,7 +774,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct
>> amdgpu_device *adev,
>> static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
>> bool enable)
>> {
>> - u32 orig, data;
>> + uint32_t orig, data;
>> orig = data = RREG32(mmHDP_MEM_POWER_LS);
>> @@ -818,7 +818,7 @@ static int gmc_v6_0_early_init(void *handle)
>> if (adev->flags & AMD_IS_APU) {
>> adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
>> } else {
>> - u32 tmp = RREG32(mmMC_SEQ_MISC0);
>> + uint32_t tmp = RREG32(mmMC_SEQ_MISC0);
>> tmp &= MC_SEQ_MISC0__MT__MASK;
>> adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
>> }
>> @@ -972,7 +972,7 @@ static int gmc_v6_0_resume(void *handle)
>> static bool gmc_v6_0_is_idle(void *handle)
>> {
>> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>> - u32 tmp = RREG32(mmSRBM_STATUS);
>> + uint32_t tmp = RREG32(mmSRBM_STATUS);
>> if (tmp & (SRBM_STATUS__MCB_BUSY_MASK |
>> SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
>> SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK |
>> SRBM_STATUS__VMC_BUSY_MASK))
>> @@ -984,7 +984,7 @@ static bool gmc_v6_0_is_idle(void *handle)
>> static int gmc_v6_0_wait_for_idle(void *handle)
>> {
>> unsigned i;
>> - u32 tmp;
>> + uint32_t tmp;
>> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>> for (i = 0; i < adev->usec_timeout; i++) {
>> @@ -1005,8 +1005,8 @@ static int gmc_v6_0_soft_reset(void *handle)
>> {
>> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>> struct amdgpu_mode_mc_save save;
>> - u32 srbm_soft_reset = 0;
>> - u32 tmp = RREG32(mmSRBM_STATUS);
>> + uint32_t srbm_soft_reset = 0;
>> + uint32_t tmp = RREG32(mmSRBM_STATUS);
>> if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
>> srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
>> @@ -1052,8 +1052,8 @@ static int
>> gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
>> unsigned type,
>> enum amdgpu_interrupt_state state)
>> {
>> - u32 tmp;
>> - u32 bits =
>> (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
>> + uint32_t tmp;
>> + uint32_t bits =
>> (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
>>
>> VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
>>
>> VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
>>
>> VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
>> @@ -1088,7 +1088,7 @@ static int gmc_v6_0_process_interrupt(struct
>> amdgpu_device *adev,
>> struct amdgpu_irq_src *source,
>> struct amdgpu_iv_entry *entry)
>> {
>> - u32 addr, status;
>> + uint32_t addr, status;
>> addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
>> status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
>
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 5+ messages in thread
* RE: [PATCH 2/2] drm/amd/amdgpu: Clean up gmc6 wait_for_idle
[not found] ` <20170515182557.10280-2-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
@ 2017-05-23 14:41 ` Deucher, Alexander
0 siblings, 0 replies; 5+ messages in thread
From: Deucher, Alexander @ 2017-05-23 14:41 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: StDenis, Tom
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf
> Of Tom St Denis
> Sent: Monday, May 15, 2017 2:26 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: StDenis, Tom
> Subject: [PATCH 2/2] drm/amd/amdgpu: Clean up gmc6 wait_for_idle
>
> Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 8 +-------
> 1 file changed, 1 insertion(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> index 81f5aa9ff719..27db0710e9ab 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> @@ -984,16 +984,10 @@ static bool gmc_v6_0_is_idle(void *handle)
> static int gmc_v6_0_wait_for_idle(void *handle)
> {
> unsigned i;
> - uint32_t tmp;
> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>
> for (i = 0; i < adev->usec_timeout; i++) {
> - tmp = RREG32(mmSRBM_STATUS) &
> (SRBM_STATUS__MCB_BUSY_MASK |
> -
> SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
> -
> SRBM_STATUS__MCC_BUSY_MASK |
> -
> SRBM_STATUS__MCD_BUSY_MASK |
> -
> SRBM_STATUS__VMC_BUSY_MASK);
> - if (!tmp)
> + if (gmc_v6_0_is_idle(handle))
> return 0;
> udelay(1);
> }
> --
> 2.12.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2017-05-23 14:41 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-15 18:25 [PATCH 1/2] drm/amd/amdgpu: Use 32/64-bit types in gmc6 Tom St Denis
[not found] ` <20170515182557.10280-1-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2017-05-15 18:25 ` [PATCH 2/2] drm/amd/amdgpu: Clean up gmc6 wait_for_idle Tom St Denis
[not found] ` <20170515182557.10280-2-tom.stdenis-5C7GfCeVMHo@public.gmane.org>
2017-05-23 14:41 ` Deucher, Alexander
2017-05-16 10:51 ` [PATCH 1/2] drm/amd/amdgpu: Use 32/64-bit types in gmc6 Christian König
[not found] ` <2f57261b-d94a-9f2d-979d-74277bf29d64-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-05-16 10:54 ` Tom St Denis
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.