* [PATCH] drm/amdgpu: add atpx quirk handling
@ 2019-08-21 22:20 Alex Deucher
[not found] ` <20190821222034.13387-1-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
0 siblings, 1 reply; 8+ messages in thread
From: Alex Deucher @ 2019-08-21 22:20 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Alex Deucher
Add quirks for handling PX/HG systems. In this case, add
a quirk for a weston dGPU that only seems to properly power
down using ATPX power control rather than HG (_PR3).
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 56 +++++++++++++++++++++---
1 file changed, 49 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index c13c51af0b68..e85b9fabad38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -14,6 +14,16 @@
#include "amd_acpi.h"
+#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0)
+
+struct amdgpu_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
struct amdgpu_atpx_functions {
bool px_params;
bool power_cntl;
@@ -35,6 +45,7 @@ struct amdgpu_atpx {
static struct amdgpu_atpx_priv {
bool atpx_detected;
bool bridge_pm_usable;
+ unsigned int quirks;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
@@ -205,13 +216,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
- printk("ATPX Hybrid Graphics\n");
- /*
- * Disable legacy PM methods only when pcie port PM is usable,
- * otherwise the device might fail to power off or power on.
- */
- atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
- atpx->is_hybrid = true;
+ if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
+ printk("ATPX Hybrid Graphics, forcing to ATPX\n");
+ atpx->functions.power_cntl = true;
+ atpx->is_hybrid = false;
+ } else {
+ printk("ATPX Hybrid Graphics\n");
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
}
atpx->dgpu_req_power_for_displays = false;
@@ -547,6 +564,29 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.get_client_id = amdgpu_atpx_get_client_id,
};
+static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ /* HG _PR3 doesn't seem to work on this A+A weston board */
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+};
+
+static void amdgpu_atpx_get_quirks(struct pci_dev *pdev)
+{
+ const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device) {
+ amdgpu_atpx_priv.quirks |= p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+}
+
/**
* amdgpu_atpx_detect - detect whether we have PX
*
@@ -570,6 +610,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
@@ -579,6 +620,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
if (has_atpx && vga_count == 2) {
--
2.13.6
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 1/2] drm/radeon: use POLL_REG_MEM special op for sDMA HDP flush
[not found] ` <20190821222034.13387-1-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
@ 2019-08-21 22:20 ` Alex Deucher
2019-08-21 22:20 ` [PATCH] XXX: hack: disable HG and use PX Alex Deucher
` (2 subsequent siblings)
3 siblings, 0 replies; 8+ messages in thread
From: Alex Deucher @ 2019-08-21 22:20 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Alex Deucher
Flush via the ring works differently on CIK and requires a
special sequence.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
drivers/gpu/drm/radeon/cik_sdma.c | 36 ++++++++++++++++++++++++------------
1 file changed, 24 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727..00984d8 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -87,6 +87,28 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
}
+static void cik_sdma_hdp_flush(struct radeon_device *rdev,
+ int ridx)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (ridx == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ /* flush HDP */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
+ radeon_ring_write(ring, ref_and_mask); /* MASK */
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+}
+
/**
* cik_sdma_fence_ring_emit - emit a fence on the DMA ring
*
@@ -111,12 +133,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
/* generate an interrupt */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
/* flush HDP */
- /* We should be using the new POLL_REG_MEM special op packet here
- * but it causes sDMA to hang sometimes
- */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
+ cik_sdma_hdp_flush(rdev, fence->ring);
}
/**
@@ -747,12 +764,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0));
/* flush HDP */
- /* We should be using the new POLL_REG_MEM special op packet here
- * but it causes sDMA to hang sometimes
- */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
+ cik_sdma_hdp_flush(rdev, ridx);
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
--
1.8.3.1
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH] XXX: hack: disable HG and use PX
[not found] ` <20190821222034.13387-1-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
2019-08-21 22:20 ` [PATCH 1/2] drm/radeon: use POLL_REG_MEM special op for sDMA HDP flush Alex Deucher
@ 2019-08-21 22:20 ` Alex Deucher
2019-08-21 22:20 ` [PATCH] XXX: hack: use atpx rather than HG for runpm v2 Alex Deucher
2019-08-21 22:20 ` [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush Alex Deucher
3 siblings, 0 replies; 8+ messages in thread
From: Alex Deucher @ 2019-08-21 22:20 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Alex Deucher
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index c13c51af0b68..9d9004afc81b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -205,13 +205,13 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
- printk("ATPX Hybrid Graphics\n");
+// printk("ATPX Hybrid Graphics\n");
/*
* Disable legacy PM methods only when pcie port PM is usable,
* otherwise the device might fail to power off or power on.
*/
- atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
- atpx->is_hybrid = true;
+// atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+// atpx->is_hybrid = true;
}
atpx->dgpu_req_power_for_displays = false;
--
2.13.6
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH] XXX: hack: use atpx rather than HG for runpm v2
[not found] ` <20190821222034.13387-1-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
2019-08-21 22:20 ` [PATCH 1/2] drm/radeon: use POLL_REG_MEM special op for sDMA HDP flush Alex Deucher
2019-08-21 22:20 ` [PATCH] XXX: hack: disable HG and use PX Alex Deucher
@ 2019-08-21 22:20 ` Alex Deucher
2019-08-21 22:20 ` [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush Alex Deucher
3 siblings, 0 replies; 8+ messages in thread
From: Alex Deucher @ 2019-08-21 22:20 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Alex Deucher
force power control even if ATPX claims to not support it.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index c13c51af0b68..1816f8189e87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -210,8 +210,8 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
* Disable legacy PM methods only when pcie port PM is usable,
* otherwise the device might fail to power off or power on.
*/
- atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
- atpx->is_hybrid = true;
+ atpx->functions.power_cntl = true;//!amdgpu_atpx_priv.bridge_pm_usable;
+// atpx->is_hybrid = true;
}
atpx->dgpu_req_power_for_displays = false;
--
2.13.6
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush
[not found] ` <20190821222034.13387-1-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
` (2 preceding siblings ...)
2019-08-21 22:20 ` [PATCH] XXX: hack: use atpx rather than HG for runpm v2 Alex Deucher
@ 2019-08-21 22:20 ` Alex Deucher
[not found] ` <20190821222034.13387-5-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
3 siblings, 1 reply; 8+ messages in thread
From: Alex Deucher @ 2019-08-21 22:20 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Alex Deucher
Flush via the ring works differently on CIK and requires a
special sequence.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
drivers/gpu/drm/radeon/cik.c | 73 +++++++++++++++++++++++++++-----------------
1 file changed, 45 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0847367..03dd075 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3485,6 +3485,48 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+static void cik_gfx_hdp_flush(struct radeon_device *rdev,
+ int ridx)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 ref_and_mask;
+
+ switch (ring->idx) {
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ switch (ring->me) {
+ case 0:
+ ref_and_mask = CP2 << ring->pipe;
+ break;
+ case 1:
+ ref_and_mask = CP6 << ring->pipe;
+ break;
+ default:
+ return;
+ }
+ break;
+ case RADEON_RING_TYPE_GFX_INDEX:
+ ref_and_mask = CP0;
+ break;
+ default:
+ return;
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ radeon_ring_write(ring, ((CP_WAIT_REG_MEM_TIMEOUT -
+ PACKET3_SET_UCONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, 0xfff);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* special op */
+ WAIT_REG_MEM_FUNCTION(3))); /* == */
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, 0xa); /* poll interval */
+}
+
/**
* cik_fence_gfx_ring_emit - emit a fence on the gfx ring
*
@@ -3511,15 +3553,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_gfx_hdp_flush(rdev, fence->ring);
}
/**
@@ -3549,15 +3583,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_gfx_hdp_flush(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -5369,16 +5395,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0));
/* HDP flush */
- /* We should be using the WAIT_REG_MEM packet here like in
- * cik_fence_ring_emit(), but it causes the CP to hang in this
- * context...
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_gfx_hdp_flush(rdev, ridx);
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
--
1.8.3.1
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush
[not found] ` <20190821222034.13387-5-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
@ 2019-08-21 22:23 ` Deucher, Alexander
2019-08-22 9:32 ` Christian König
1 sibling, 0 replies; 8+ messages in thread
From: Deucher, Alexander @ 2019-08-21 22:23 UTC (permalink / raw)
To: Alex Deucher, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
[-- Attachment #1.1: Type: text/plain, Size: 5047 bytes --]
Ignore those. wrong directory.
Alex
________________________________
From: Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Sent: Wednesday, August 21, 2019 6:20 PM
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org <amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>
Cc: Deucher, Alexander <Alexander.Deucher-5C7GfCeVMHo@public.gmane.org>
Subject: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush
Flush via the ring works differently on CIK and requires a
special sequence.
Signed-off-by: Alex Deucher <alexander.deucher-5C7GfCeVMHo@public.gmane.org>
---
drivers/gpu/drm/radeon/cik.c | 73 +++++++++++++++++++++++++++-----------------
1 file changed, 45 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0847367..03dd075 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3485,6 +3485,48 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+static void cik_gfx_hdp_flush(struct radeon_device *rdev,
+ int ridx)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 ref_and_mask;
+
+ switch (ring->idx) {
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ switch (ring->me) {
+ case 0:
+ ref_and_mask = CP2 << ring->pipe;
+ break;
+ case 1:
+ ref_and_mask = CP6 << ring->pipe;
+ break;
+ default:
+ return;
+ }
+ break;
+ case RADEON_RING_TYPE_GFX_INDEX:
+ ref_and_mask = CP0;
+ break;
+ default:
+ return;
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ radeon_ring_write(ring, ((CP_WAIT_REG_MEM_TIMEOUT -
+ PACKET3_SET_UCONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, 0xfff);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* special op */
+ WAIT_REG_MEM_FUNCTION(3))); /* == */
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, 0xa); /* poll interval */
+}
+
/**
* cik_fence_gfx_ring_emit - emit a fence on the gfx ring
*
@@ -3511,15 +3553,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_gfx_hdp_flush(rdev, fence->ring);
}
/**
@@ -3549,15 +3583,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_gfx_hdp_flush(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -5369,16 +5395,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0));
/* HDP flush */
- /* We should be using the WAIT_REG_MEM packet here like in
- * cik_fence_ring_emit(), but it causes the CP to hang in this
- * context...
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_gfx_hdp_flush(rdev, ridx);
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
--
1.8.3.1
[-- Attachment #1.2: Type: text/html, Size: 10914 bytes --]
[-- Attachment #2: Type: text/plain, Size: 153 bytes --]
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush
[not found] ` <20190821222034.13387-5-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
2019-08-21 22:23 ` Deucher, Alexander
@ 2019-08-22 9:32 ` Christian König
[not found] ` <217a929a-543f-7aa4-4494-3569f9998a97-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
1 sibling, 1 reply; 8+ messages in thread
From: Christian König @ 2019-08-22 9:32 UTC (permalink / raw)
To: Alex Deucher, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Alex Deucher
Every time we actually tried this it just ended in users reporting CP hangs.
Christian.
Am 22.08.19 um 00:20 schrieb Alex Deucher:
> Flush via the ring works differently on CIK and requires a
> special sequence.
>
> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
> ---
> drivers/gpu/drm/radeon/cik.c | 73 +++++++++++++++++++++++++++-----------------
> 1 file changed, 45 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
> index 0847367..03dd075 100644
> --- a/drivers/gpu/drm/radeon/cik.c
> +++ b/drivers/gpu/drm/radeon/cik.c
> @@ -3485,6 +3485,48 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
> return r;
> }
>
> +static void cik_gfx_hdp_flush(struct radeon_device *rdev,
> + int ridx)
> +{
> + struct radeon_ring *ring = &rdev->ring[ridx];
> + u32 ref_and_mask;
> +
> + switch (ring->idx) {
> + case CAYMAN_RING_TYPE_CP1_INDEX:
> + case CAYMAN_RING_TYPE_CP2_INDEX:
> + switch (ring->me) {
> + case 0:
> + ref_and_mask = CP2 << ring->pipe;
> + break;
> + case 1:
> + ref_and_mask = CP6 << ring->pipe;
> + break;
> + default:
> + return;
> + }
> + break;
> + case RADEON_RING_TYPE_GFX_INDEX:
> + ref_and_mask = CP0;
> + break;
> + default:
> + return;
> + }
> +
> + radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
> + radeon_ring_write(ring, ((CP_WAIT_REG_MEM_TIMEOUT -
> + PACKET3_SET_UCONFIG_REG_START) >> 2));
> + radeon_ring_write(ring, 0xfff);
> +
> + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
> + radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* special op */
> + WAIT_REG_MEM_FUNCTION(3))); /* == */
> + radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
> + radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
> + radeon_ring_write(ring, ref_and_mask);
> + radeon_ring_write(ring, ref_and_mask);
> + radeon_ring_write(ring, 0xa); /* poll interval */
> +}
> +
> /**
> * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
> *
> @@ -3511,15 +3553,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
> radeon_ring_write(ring, fence->seq);
> radeon_ring_write(ring, 0);
> /* HDP flush */
> - /* We should be using the new WAIT_REG_MEM special op packet here
> - * but it causes the CP to hang
> - */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> - WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, fence->ring);
> }
>
> /**
> @@ -3549,15 +3583,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
> radeon_ring_write(ring, fence->seq);
> radeon_ring_write(ring, 0);
> /* HDP flush */
> - /* We should be using the new WAIT_REG_MEM special op packet here
> - * but it causes the CP to hang
> - */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> - WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, fence->ring);
> }
>
> bool cik_semaphore_ring_emit(struct radeon_device *rdev,
> @@ -5369,16 +5395,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
> radeon_ring_write(ring, VMID(0));
>
> /* HDP flush */
> - /* We should be using the WAIT_REG_MEM packet here like in
> - * cik_fence_ring_emit(), but it causes the CP to hang in this
> - * context...
> - */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> - WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, ridx);
>
> /* bits 0-15 are the VM contexts0-15 */
> radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush
[not found] ` <217a929a-543f-7aa4-4494-3569f9998a97-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-08-22 12:47 ` Deucher, Alexander
0 siblings, 0 replies; 8+ messages in thread
From: Deucher, Alexander @ 2019-08-22 12:47 UTC (permalink / raw)
To: Alex Deucher, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Koenig,
Christian
[-- Attachment #1.1: Type: text/plain, Size: 5363 bytes --]
This was an old patch I accidentally sent out. Please ignore.
Alex
________________________________
From: Christian K?nig <ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Sent: Thursday, August 22, 2019 5:32 AM
To: Alex Deucher <alexdeucher-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>; amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org <amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>
Cc: Deucher, Alexander <Alexander.Deucher-5C7GfCeVMHo@public.gmane.org>
Subject: Re: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush
Every time we actually tried this it just ended in users reporting CP hangs.
Christian.
Am 22.08.19 um 00:20 schrieb Alex Deucher:
> Flush via the ring works differently on CIK and requires a
> special sequence.
>
> Signed-off-by: Alex Deucher <alexander.deucher-5C7GfCeVMHo@public.gmane.org>
> ---
> drivers/gpu/drm/radeon/cik.c | 73 +++++++++++++++++++++++++++-----------------
> 1 file changed, 45 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
> index 0847367..03dd075 100644
> --- a/drivers/gpu/drm/radeon/cik.c
> +++ b/drivers/gpu/drm/radeon/cik.c
> @@ -3485,6 +3485,48 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
> return r;
> }
>
> +static void cik_gfx_hdp_flush(struct radeon_device *rdev,
> + int ridx)
> +{
> + struct radeon_ring *ring = &rdev->ring[ridx];
> + u32 ref_and_mask;
> +
> + switch (ring->idx) {
> + case CAYMAN_RING_TYPE_CP1_INDEX:
> + case CAYMAN_RING_TYPE_CP2_INDEX:
> + switch (ring->me) {
> + case 0:
> + ref_and_mask = CP2 << ring->pipe;
> + break;
> + case 1:
> + ref_and_mask = CP6 << ring->pipe;
> + break;
> + default:
> + return;
> + }
> + break;
> + case RADEON_RING_TYPE_GFX_INDEX:
> + ref_and_mask = CP0;
> + break;
> + default:
> + return;
> + }
> +
> + radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
> + radeon_ring_write(ring, ((CP_WAIT_REG_MEM_TIMEOUT -
> + PACKET3_SET_UCONFIG_REG_START) >> 2));
> + radeon_ring_write(ring, 0xfff);
> +
> + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
> + radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* special op */
> + WAIT_REG_MEM_FUNCTION(3))); /* == */
> + radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
> + radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
> + radeon_ring_write(ring, ref_and_mask);
> + radeon_ring_write(ring, ref_and_mask);
> + radeon_ring_write(ring, 0xa); /* poll interval */
> +}
> +
> /**
> * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
> *
> @@ -3511,15 +3553,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
> radeon_ring_write(ring, fence->seq);
> radeon_ring_write(ring, 0);
> /* HDP flush */
> - /* We should be using the new WAIT_REG_MEM special op packet here
> - * but it causes the CP to hang
> - */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> - WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, fence->ring);
> }
>
> /**
> @@ -3549,15 +3583,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
> radeon_ring_write(ring, fence->seq);
> radeon_ring_write(ring, 0);
> /* HDP flush */
> - /* We should be using the new WAIT_REG_MEM special op packet here
> - * but it causes the CP to hang
> - */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> - WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, fence->ring);
> }
>
> bool cik_semaphore_ring_emit(struct radeon_device *rdev,
> @@ -5369,16 +5395,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
> radeon_ring_write(ring, VMID(0));
>
> /* HDP flush */
> - /* We should be using the WAIT_REG_MEM packet here like in
> - * cik_fence_ring_emit(), but it causes the CP to hang in this
> - * context...
> - */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> - WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, ridx);
>
> /* bits 0-15 are the VM contexts0-15 */
> radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
[-- Attachment #1.2: Type: text/html, Size: 10970 bytes --]
[-- Attachment #2: Type: text/plain, Size: 153 bytes --]
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2019-08-22 12:47 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-21 22:20 [PATCH] drm/amdgpu: add atpx quirk handling Alex Deucher
[not found] ` <20190821222034.13387-1-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
2019-08-21 22:20 ` [PATCH 1/2] drm/radeon: use POLL_REG_MEM special op for sDMA HDP flush Alex Deucher
2019-08-21 22:20 ` [PATCH] XXX: hack: disable HG and use PX Alex Deucher
2019-08-21 22:20 ` [PATCH] XXX: hack: use atpx rather than HG for runpm v2 Alex Deucher
2019-08-21 22:20 ` [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush Alex Deucher
[not found] ` <20190821222034.13387-5-alexander.deucher-5C7GfCeVMHo@public.gmane.org>
2019-08-21 22:23 ` Deucher, Alexander
2019-08-22 9:32 ` Christian König
[not found] ` <217a929a-543f-7aa4-4494-3569f9998a97-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-08-22 12:47 ` Deucher, Alexander
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.