All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/5] drm/amdgpu: Add gfx v12 pte/pde format change
@ 2024-04-25 19:58 Alex Deucher
  2024-04-25 19:58 ` [PATCH 2/5] drm/amdgpu: Add gmc v12_0 ip block support (v7) Alex Deucher
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Alex Deucher @ 2024-04-25 19:58 UTC (permalink / raw)
  To: amd-gfx; +Cc: Hawking Zhang, Likun Gao, Alex Deucher

From: Hawking Zhang <Hawking.Zhang@amd.com>

Add gfx v12 pte/pde format change.

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 54d7da396de0..e0e7e944a323 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -111,6 +111,19 @@ struct amdgpu_mem_stats;
 #define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
 #define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
 
+/* gfx12 */
+#define AMDGPU_PTE_PRT_GFX12		(1ULL << 56)
+
+#define AMDGPU_PTE_MTYPE_GFX12(a)	((uint64_t)(a) << 54)
+#define AMDGPU_PTE_MTYPE_GFX12_MASK	AMDGPU_PTE_MTYPE_GFX12(3ULL)
+
+#define AMDGPU_PTE_IS_PTE		(1ULL << 63)
+
+/* PDE Block Fragment Size for gfx v12 */
+#define AMDGPU_PDE_BFS_GFX12(a)		((uint64_t)((a) & 0x1fULL) << 58)
+/* PDE is handled as PTE for gfx v12 */
+#define AMDGPU_PDE_PTE_GFX12		(1ULL << 63)
+
 /* How to program VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER	0
 #define AMDGPU_VM_FAULT_STOP_FIRST	1
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/5] drm/amdgpu: Add gmc v12_0 ip block support (v7)
  2024-04-25 19:58 [PATCH 1/5] drm/amdgpu: Add gfx v12 pte/pde format change Alex Deucher
@ 2024-04-25 19:58 ` Alex Deucher
  2024-04-25 19:58 ` [PATCH 3/5] drm/amdgpu: Set pte_is_pte flag in gmc v12 gart Alex Deucher
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Alex Deucher @ 2024-04-25 19:58 UTC (permalink / raw)
  To: amd-gfx; +Cc: Hawking Zhang, Likun Gao, Alex Deucher

From: Hawking Zhang <Hawking.Zhang@amd.com>

Add initial support for GMC v12.

v1: Add gmc v12_0 ip block support.
v2: Switch to gfx.kiq array.
v3: Switch to vmhubs_mask.
v4: Switch to AMDGPU_MMHUB0(0) and AMDGPU_GFXHUB(0)
v5: Rebase (Alex)
v6: Squash in fixes for AGP handling, gfxhub init order,
    vmhub index (Alex)
v7: Rebase (Alex)
v8: squash in ecc fix (Alex)

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile    |    2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c | 1000 ++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/gmc_v12_0.h |   30 +
 3 files changed, 1031 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v12_0.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 5c0e7b512e25..9a793f4d8fcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -116,7 +116,7 @@ amdgpu-y += \
 	gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
 	mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \
 	mmhub_v3_0_1.o gfxhub_v3_0_3.o gfxhub_v1_2.o mmhub_v1_8.o mmhub_v3_3.o \
-	gfxhub_v11_5_0.o mmhub_v4_1_0.o gfxhub_v12_0.o
+	gfxhub_v11_5_0.o mmhub_v4_1_0.o gfxhub_v12_0.o gmc_v12_0.o
 
 # add UMC block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
new file mode 100644
index 000000000000..c85ebc8360e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/pci.h>
+
+#include <drm/drm_cache.h>
+
+#include "amdgpu.h"
+#include "amdgpu_atomfirmware.h"
+#include "gmc_v12_0.h"
+#include "athub/athub_4_1_0_sh_mask.h"
+#include "athub/athub_4_1_0_offset.h"
+#include "oss/osssys_7_0_0_offset.h"
+#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+#include "soc24_enum.h"
+#include "soc24.h"
+#include "soc15d.h"
+#include "soc15_common.h"
+#include "nbif_v6_3_1.h"
+#include "gfxhub_v12_0.h"
+#include "mmhub_v4_1_0.h"
+#include "athub_v4_1_0.h"
+
+
+static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
+					 struct amdgpu_irq_src *src,
+					 unsigned type,
+					 enum amdgpu_interrupt_state state)
+{
+	return 0;
+}
+
+static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+					      struct amdgpu_irq_src *src, unsigned type,
+					      enum amdgpu_interrupt_state state)
+{
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		/* MM HUB */
+		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
+		/* GFX HUB */
+		/* This works because this interrupt is only
+		 * enabled at init/resume and disabled in
+		 * fini/suspend, so the overall state doesn't
+		 * change over the course of suspend/resume.
+		 */
+		if (!adev->in_s0ix)
+			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		/* MM HUB */
+		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
+		/* GFX HUB */
+		/* This works because this interrupt is only
+		 * enabled at init/resume and disabled in
+		 * fini/suspend, so the overall state doesn't
+		 * change over the course of suspend/resume.
+		 */
+		if (!adev->in_s0ix)
+			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
+				       struct amdgpu_irq_src *source,
+				       struct amdgpu_iv_entry *entry)
+{
+	struct amdgpu_vmhub *hub;
+	uint32_t status = 0;
+	u64 addr;
+
+	addr = (u64)entry->src_data[0] << 12;
+	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
+	if (entry->client_id == SOC21_IH_CLIENTID_VMC)
+		hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
+	else
+		hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+
+	if (!amdgpu_sriov_vf(adev)) {
+		/*
+		 * Issue a dummy read to wait for the status register to
+		 * be updated to avoid reading an incorrect value due to
+		 * the new fast GRBM interface.
+		 */
+		if (entry->vmid_src == AMDGPU_GFXHUB(0))
+			RREG32(hub->vm_l2_pro_fault_status);
+
+		status = RREG32(hub->vm_l2_pro_fault_status);
+		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
+
+		amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
+					     entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
+	}
+
+	if (printk_ratelimit()) {
+		struct amdgpu_task_info *task_info;
+
+		dev_err(adev->dev,
+			"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
+			entry->vmid_src ? "mmhub" : "gfxhub",
+			entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
+		task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
+		if (task_info) {
+			dev_err(adev->dev,
+				" in process %s pid %d thread %s pid %d)\n",
+				task_info->process_name, task_info->tgid,
+				task_info->task_name, task_info->pid);
+			amdgpu_vm_put_task_info(task_info);
+		}
+
+		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
+				addr, entry->client_id);
+
+		if (!amdgpu_sriov_vf(adev))
+			hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
+	}
+
+	return 0;
+}
+
+static const struct amdgpu_irq_src_funcs gmc_v12_0_irq_funcs = {
+	.set = gmc_v12_0_vm_fault_interrupt_state,
+	.process = gmc_v12_0_process_interrupt,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v12_0_ecc_funcs = {
+	.set = gmc_v12_0_ecc_interrupt_state,
+	.process = amdgpu_umc_process_ecc_irq,
+};
+
+static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->gmc.vm_fault.num_types = 1;
+	adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs;
+
+	if (!amdgpu_sriov_vf(adev)) {
+		adev->gmc.ecc_irq.num_types = 1;
+		adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs;
+	}
+}
+
+/**
+ * gmc_v12_0_use_invalidate_semaphore - judge whether to use semaphore
+ *
+ * @adev: amdgpu_device pointer
+ * @vmhub: vmhub type
+ *
+ */
+static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev,
+				       uint32_t vmhub)
+{
+	return ((vmhub == AMDGPU_MMHUB0(0)) &&
+		(!amdgpu_sriov_vf(adev)));
+}
+
+static bool gmc_v12_0_get_vmid_pasid_mapping_info(
+					struct amdgpu_device *adev,
+					uint8_t vmid, uint16_t *p_pasid)
+{
+	*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
+
+	return !!(*p_pasid);
+}
+
+/*
+ * GART
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the amdgpu vm/hsa code.
+ */
+
+static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
+				   unsigned int vmhub, uint32_t flush_type)
+{
+	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub);
+	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
+	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
+	u32 tmp;
+	/* Use register 17 for GART */
+	const unsigned eng = 17;
+	unsigned int i;
+	unsigned char hub_ip = 0;
+
+	hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
+		   GC_HWIP : MMHUB_HWIP;
+
+	spin_lock(&adev->gmc.invalidate_lock);
+	/*
+	 * It may lose gpuvm invalidate acknowldege state across power-gating
+	 * off cycle, add semaphore acquire before invalidation and semaphore
+	 * release after invalidation to avoid entering power gated state
+	 * to WA the Issue
+	 */
+
+	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+	if (use_semaphore) {
+		for (i = 0; i < adev->usec_timeout; i++) {
+			/* a read return value of 1 means semaphore acuqire */
+			tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+					    hub->eng_distance * eng, hub_ip);
+			if (tmp & 0x1)
+				break;
+			udelay(1);
+		}
+
+		if (i >= adev->usec_timeout)
+			dev_err(adev->dev,
+				"Timeout waiting for sem acquire in VM flush!\n");
+	}
+
+	WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
+
+	/* Wait for ACK with a delay.*/
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
+				    hub->eng_distance * eng, hub_ip);
+		tmp &= 1 << vmid;
+		if (tmp)
+			break;
+
+		udelay(1);
+	}
+
+	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+	if (use_semaphore)
+		/*
+		 * add semaphore release after invalidation,
+		 * write with 0 means semaphore release
+		 */
+		WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+			      hub->eng_distance * eng, 0, hub_ip);
+
+	/* Issue additional private vm invalidation to MMHUB */
+	if ((vmhub != AMDGPU_GFXHUB(0)) &&
+	    (hub->vm_l2_bank_select_reserved_cid2) &&
+		!amdgpu_sriov_vf(adev)) {
+		inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
+		/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
+		inv_req |= (1 << 25);
+		/* Issue private invalidation */
+		WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
+		/* Read back to ensure invalidation is done*/
+		RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
+	}
+
+	spin_unlock(&adev->gmc.invalidate_lock);
+
+	if (i < adev->usec_timeout)
+		return;
+
+	dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
+}
+
+/**
+ * gmc_v12_0_flush_gpu_tlb - gart tlb flush callback
+ *
+ * @adev: amdgpu_device pointer
+ * @vmid: vm instance to flush
+ *
+ * Flush the TLB for the requested page table.
+ */
+static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+					uint32_t vmhub, uint32_t flush_type)
+{
+	if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
+		return;
+
+	/* flush hdp cache */
+	adev->hdp.funcs->flush_hdp(adev, NULL);
+
+	/* This is necessary for SRIOV as well as for GFXOFF to function
+	 * properly under bare metal
+	 */
+	if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
+	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
+		struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
+		const unsigned eng = 17;
+		u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
+		u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
+		u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
+
+		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+				1 << vmid, GET_INST(GC, 0));
+		return;
+	}
+
+	mutex_lock(&adev->mman.gtt_window_lock);
+	gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
+	mutex_unlock(&adev->mman.gtt_window_lock);
+	return;
+}
+
+/**
+ * gmc_v12_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+					  uint16_t pasid, uint32_t flush_type,
+					  bool all_hub, uint32_t inst)
+{
+	uint16_t queried;
+	int vmid, i;
+
+	for (vmid = 1; vmid < 16; vmid++) {
+		bool valid;
+
+		valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid,
+							      &queried);
+		if (!valid || queried != pasid)
+			continue;
+
+		if (all_hub) {
+			for_each_set_bit(i, adev->vmhubs_mask,
+					 AMDGPU_MAX_VMHUBS)
+				gmc_v12_0_flush_gpu_tlb(adev, vmid, i,
+							flush_type);
+		} else {
+			gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
+						flush_type);
+		}
+	}
+}
+
+static uint64_t gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+					     unsigned vmid, uint64_t pd_addr)
+{
+	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
+	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
+	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
+	unsigned eng = ring->vm_inv_eng;
+
+	/*
+	 * It may lose gpuvm invalidate acknowldege state across power-gating
+	 * off cycle, add semaphore acquire before invalidation and semaphore
+	 * release after invalidation to avoid entering power gated state
+	 * to WA the Issue
+	 */
+
+	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+	if (use_semaphore)
+		/* a read return value of 1 means semaphore acuqire */
+		amdgpu_ring_emit_reg_wait(ring,
+					  hub->vm_inv_eng0_sem +
+					  hub->eng_distance * eng, 0x1, 0x1);
+
+	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
+			      (hub->ctx_addr_distance * vmid),
+			      lower_32_bits(pd_addr));
+
+	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
+			      (hub->ctx_addr_distance * vmid),
+			      upper_32_bits(pd_addr));
+
+	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
+					    hub->eng_distance * eng,
+					    hub->vm_inv_eng0_ack +
+					    hub->eng_distance * eng,
+					    req, 1 << vmid);
+
+	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+	if (use_semaphore)
+		/*
+		 * add semaphore release after invalidation,
+		 * write with 0 means semaphore release
+		 */
+		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
+				      hub->eng_distance * eng, 0);
+
+	return pd_addr;
+}
+
+static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
+					 unsigned pasid)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t reg;
+
+	/* MES fw manages IH_VMID_x_LUT updating */
+	if (ring->is_mes_queue)
+		return;
+
+	if (ring->vm_hub == AMDGPU_GFXHUB(0))
+		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
+	else
+		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
+
+	amdgpu_ring_emit_wreg(ring, reg, pasid);
+}
+
+/*
+ * PTE format:
+ * 63 P
+ * 62:59 reserved
+ * 58 D
+ * 57 G
+ * 56 T
+ * 55:54 M
+ * 53:52 SW
+ * 51:48 reserved for future
+ * 47:12 4k physical page base address
+ * 11:7 fragment
+ * 6 write
+ * 5 read
+ * 4 exe
+ * 3 Z
+ * 2 snooped
+ * 1 system
+ * 0 valid
+ *
+ * PDE format:
+ * 63 P
+ * 62:58 block fragment size
+ * 57 reserved
+ * 56 A
+ * 55:54 M
+ * 53:52 reserved
+ * 51:48 reserved for future
+ * 47:6 physical base address of PD or PTE
+ * 5:3 reserved
+ * 2 C
+ * 1 system
+ * 0 valid
+ */
+
+static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
+{
+	switch (flags) {
+	case AMDGPU_VM_MTYPE_DEFAULT:
+		return AMDGPU_PTE_MTYPE_GFX12(MTYPE_NC);
+	case AMDGPU_VM_MTYPE_NC:
+		return AMDGPU_PTE_MTYPE_GFX12(MTYPE_NC);
+	case AMDGPU_VM_MTYPE_WC:
+		return AMDGPU_PTE_MTYPE_GFX12(MTYPE_WC);
+	case AMDGPU_VM_MTYPE_CC:
+		return AMDGPU_PTE_MTYPE_GFX12(MTYPE_CC);
+	case AMDGPU_VM_MTYPE_UC:
+		return AMDGPU_PTE_MTYPE_GFX12(MTYPE_UC);
+	default:
+		return AMDGPU_PTE_MTYPE_GFX12(MTYPE_NC);
+	}
+}
+
+static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
+				 uint64_t *addr, uint64_t *flags)
+{
+	if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM))
+		*addr = adev->vm_manager.vram_base_offset + *addr -
+			adev->gmc.vram_start;
+	BUG_ON(*addr & 0xFFFF00000000003FULL);
+
+	if (!adev->gmc.translate_further)
+		return;
+
+	if (level == AMDGPU_VM_PDB1) {
+		/* Set the block fragment size */
+		if (!(*flags & AMDGPU_PDE_PTE_GFX12))
+			*flags |= AMDGPU_PDE_BFS_GFX12(0x9);
+
+	} else if (level == AMDGPU_VM_PDB0) {
+		if (*flags & AMDGPU_PDE_PTE_GFX12)
+			*flags &= ~AMDGPU_PDE_PTE_GFX12;
+	}
+}
+
+static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
+				 struct amdgpu_bo_va_mapping *mapping,
+				 uint64_t *flags)
+{
+	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+
+	*flags &= ~AMDGPU_PTE_EXECUTABLE;
+	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+	*flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
+	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
+
+	if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
+		*flags |= AMDGPU_PTE_PRT_GFX12;
+		*flags |= AMDGPU_PTE_SNOOPED;
+		*flags |= AMDGPU_PTE_SYSTEM;
+		*flags &= ~AMDGPU_PTE_VALID;
+	}
+
+	if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+			       AMDGPU_GEM_CREATE_UNCACHED))
+		*flags = (*flags & ~AMDGPU_PTE_MTYPE_GFX12_MASK) |
+			 AMDGPU_PTE_MTYPE_GFX12(MTYPE_UC);
+}
+
+static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
+	.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
+	.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
+	.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
+	.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
+	.map_mtype = gmc_v12_0_map_mtype,
+	.get_vm_pde = gmc_v12_0_get_vm_pde,
+	.get_vm_pte = gmc_v12_0_get_vm_pte,
+	.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
+};
+
+static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
+{
+	adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs;
+}
+
+static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
+{
+}
+
+
+static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev)
+{
+	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
+	case IP_VERSION(4, 1, 0):
+		adev->mmhub.funcs = &mmhub_v4_1_0_funcs;
+		break;
+	default:
+		break;
+	}
+}
+
+static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+	case IP_VERSION(12, 0, 0):
+	case IP_VERSION(12, 0, 1):
+		adev->gfxhub.funcs = &gfxhub_v12_0_funcs;
+		break;
+	default:
+		break;
+	}
+}
+
+static int gmc_v12_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gmc_v12_0_set_gfxhub_funcs(adev);
+	gmc_v12_0_set_mmhub_funcs(adev);
+	gmc_v12_0_set_gmc_funcs(adev);
+	gmc_v12_0_set_irq_funcs(adev);
+	gmc_v12_0_set_umc_funcs(adev);
+
+	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
+	adev->gmc.shared_aperture_end =
+		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
+	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
+	adev->gmc.private_aperture_end =
+		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+
+	return 0;
+}
+
+static int gmc_v12_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_gmc_ras_late_init(adev);
+	if (r)
+		return r;
+
+	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
+}
+
+static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev,
+					struct amdgpu_gmc *mc)
+{
+	u64 base = 0;
+
+	base = adev->mmhub.funcs->get_fb_location(adev);
+
+	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
+	if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
+		amdgpu_gmc_agp_location(adev, mc);
+
+	/* base offset of vram pages */
+	if (amdgpu_sriov_vf(adev))
+		adev->vm_manager.vram_base_offset = 0;
+	else
+		adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
+}
+
+/**
+ * gmc_v12_0_mc_init - initialize the memory controller driver params
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the amount of vram, vram width, and decide how to place
+ * vram and gart within the GPU's physical address space.
+ * Returns 0 for success.
+ */
+static int gmc_v12_0_mc_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	/* size in MB on si */
+	adev->gmc.mc_vram_size =
+		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
+
+	if (!(adev->flags & AMD_IS_APU)) {
+		r = amdgpu_device_resize_fb_bar(adev);
+		if (r)
+			return r;
+	}
+
+	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+
+#ifdef CONFIG_X86_64
+	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
+		adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
+		adev->gmc.aper_size = adev->gmc.real_vram_size;
+	}
+#endif
+	/* In case the PCI BAR is larger than the actual amount of vram */
+	adev->gmc.visible_vram_size = adev->gmc.aper_size;
+	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
+		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
+
+	/* set the gart size */
+	if (amdgpu_gart_size == -1) {
+		adev->gmc.gart_size = 512ULL << 20;
+	} else
+		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
+
+	gmc_v12_0_vram_gtt_location(adev, &adev->gmc);
+
+	return 0;
+}
+
+static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->gart.bo) {
+		WARN(1, "PCIE GART already initialized\n");
+		return 0;
+	}
+
+	/* Initialize common gart structure */
+	r = amdgpu_gart_init(adev);
+	if (r)
+		return r;
+
+	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(MTYPE_UC) |
+				 AMDGPU_PTE_EXECUTABLE;
+
+	return amdgpu_gart_table_vram_alloc(adev);
+}
+
+static int gmc_v12_0_sw_init(void *handle)
+{
+	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->mmhub.funcs->init(adev);
+
+	adev->gfxhub.funcs->init(adev);
+
+	spin_lock_init(&adev->gmc.invalidate_lock);
+
+	r = amdgpu_atomfirmware_get_vram_info(adev,
+					      &vram_width, &vram_type, &vram_vendor);
+	adev->gmc.vram_width = vram_width;
+
+	adev->gmc.vram_type = vram_type;
+	adev->gmc.vram_vendor = vram_vendor;
+
+	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+	case IP_VERSION(12, 0, 0):
+	case IP_VERSION(12, 0, 1):
+		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
+		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
+		/*
+		 * To fulfill 4-level page support,
+		 * vm size is 256TB (48bit), maximum size,
+		 * block size 512 (9bit)
+		 */
+		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+		break;
+	default:
+		break;
+	}
+
+	/* This interrupt is VMC page fault.*/
+	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
+			      VMC_1_0__SRCID__VM_FAULT,
+			      &adev->gmc.vm_fault);
+
+	if (r)
+		return r;
+
+	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+			      UTCL2_1_0__SRCID__FAULT,
+			      &adev->gmc.vm_fault);
+	if (r)
+		return r;
+
+	if (!amdgpu_sriov_vf(adev)) {
+		/* interrupt sent to DF. */
+		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
+				      &adev->gmc.ecc_irq);
+		if (r)
+			return r;
+	}
+
+	/*
+	 * Set the internal MC address mask This is the max address of the GPU's
+	 * internal address space.
+	 */
+	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
+
+	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(48));
+	if (r) {
+		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+		return r;
+	}
+
+	adev->need_swiotlb = drm_need_swiotlb(44);
+
+	r = gmc_v12_0_mc_init(adev);
+	if (r)
+		return r;
+
+	amdgpu_gmc_get_vbios_allocations(adev);
+
+	/* Memory manager */
+	r = amdgpu_bo_init(adev);
+	if (r)
+		return r;
+
+	r = gmc_v12_0_gart_init(adev);
+	if (r)
+		return r;
+
+	/*
+	 * number of VMs
+	 * VMID 0 is reserved for System
+	 * amdgpu graphics/compute will use VMIDs 1-7
+	 * amdkfd will use VMIDs 8-15
+	 */
+	adev->vm_manager.first_kfd_vmid = 8;
+
+	amdgpu_vm_manager_init(adev);
+
+	return 0;
+}
+
+/**
+ * gmc_v12_0_gart_fini - vm fini callback
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tears down the driver GART/VM setup (CIK).
+ */
+static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
+{
+	amdgpu_gart_table_vram_free(adev);
+}
+
+static int gmc_v12_0_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_vm_manager_fini(adev);
+	gmc_v12_0_gart_fini(adev);
+	amdgpu_gem_force_release(adev);
+	amdgpu_bo_fini(adev);
+
+	return 0;
+}
+
+static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev)
+{
+}
+
+/**
+ * gmc_v12_0_gart_enable - gart enable
+ *
+ * @adev: amdgpu_device pointer
+ */
+static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
+{
+	int r;
+	bool value;
+
+	if (adev->gart.bo == NULL) {
+		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+
+	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
+
+	r = adev->mmhub.funcs->gart_enable(adev);
+	if (r)
+		return r;
+
+	/* Flush HDP after it is initialized */
+	adev->hdp.funcs->flush_hdp(adev, NULL);
+
+	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
+		false : true;
+
+	adev->mmhub.funcs->set_fault_enable_default(adev, value);
+	gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
+
+	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(adev->gmc.gart_size >> 20),
+		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+
+	return 0;
+}
+
+static int gmc_v12_0_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* The sequence of these two function calls matters.*/
+	gmc_v12_0_init_golden_registers(adev);
+
+	r = gmc_v12_0_gart_enable(adev);
+	if (r)
+		return r;
+
+	if (adev->umc.funcs && adev->umc.funcs->init_registers)
+		adev->umc.funcs->init_registers(adev);
+
+	return 0;
+}
+
+/**
+ * gmc_v12_0_gart_disable - gart disable
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This disables all VM page table.
+ */
+static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
+{
+	adev->mmhub.funcs->gart_disable(adev);
+}
+
+static int gmc_v12_0_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (amdgpu_sriov_vf(adev)) {
+		/* full access mode, so don't touch any GMC register */
+		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
+		return 0;
+	}
+
+	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+
+	if (adev->gmc.ecc_irq.funcs &&
+		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
+	gmc_v12_0_gart_disable(adev);
+
+	return 0;
+}
+
+static int gmc_v12_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gmc_v12_0_hw_fini(adev);
+
+	return 0;
+}
+
+static int gmc_v12_0_resume(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = gmc_v12_0_hw_init(adev);
+	if (r)
+		return r;
+
+	amdgpu_vmid_reset_all(adev);
+
+	return 0;
+}
+
+static bool gmc_v12_0_is_idle(void *handle)
+{
+	/* MC is always ready in GMC v11.*/
+	return true;
+}
+
+static int gmc_v12_0_wait_for_idle(void *handle)
+{
+	/* There is no need to wait for MC idle in GMC v11.*/
+	return 0;
+}
+
+static int gmc_v12_0_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int gmc_v12_0_set_clockgating_state(void *handle,
+					   enum amd_clockgating_state state)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = adev->mmhub.funcs->set_clockgating(adev, state);
+	if (r)
+		return r;
+
+	return athub_v4_1_0_set_clockgating(adev, state);
+}
+
+static void gmc_v12_0_get_clockgating_state(void *handle, u64 *flags)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->mmhub.funcs->get_clockgating(adev, flags);
+
+	athub_v4_1_0_get_clockgating(adev, flags);
+}
+
+static int gmc_v12_0_set_powergating_state(void *handle,
+					   enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
+	.name = "gmc_v12_0",
+	.early_init = gmc_v12_0_early_init,
+	.sw_init = gmc_v12_0_sw_init,
+	.hw_init = gmc_v12_0_hw_init,
+	.late_init = gmc_v12_0_late_init,
+	.sw_fini = gmc_v12_0_sw_fini,
+	.hw_fini = gmc_v12_0_hw_fini,
+	.suspend = gmc_v12_0_suspend,
+	.resume = gmc_v12_0_resume,
+	.is_idle = gmc_v12_0_is_idle,
+	.wait_for_idle = gmc_v12_0_wait_for_idle,
+	.soft_reset = gmc_v12_0_soft_reset,
+	.set_clockgating_state = gmc_v12_0_set_clockgating_state,
+	.set_powergating_state = gmc_v12_0_set_powergating_state,
+	.get_clockgating_state = gmc_v12_0_get_clockgating_state,
+};
+
+const struct amdgpu_ip_block_version gmc_v12_0_ip_block = {
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 12,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gmc_v12_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.h
new file mode 100644
index 000000000000..deca93e4a156
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GMC_V12_0_H__
+#define __GMC_V12_0_H__
+
+extern const struct amd_ip_funcs gmc_v12_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v12_0_ip_block;
+
+#endif
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/5] drm/amdgpu: Set pte_is_pte flag in gmc v12 gart
  2024-04-25 19:58 [PATCH 1/5] drm/amdgpu: Add gfx v12 pte/pde format change Alex Deucher
  2024-04-25 19:58 ` [PATCH 2/5] drm/amdgpu: Add gmc v12_0 ip block support (v7) Alex Deucher
@ 2024-04-25 19:58 ` Alex Deucher
  2024-04-25 19:58 ` [PATCH 4/5] drm/amdgpu: support gfx v12 specific pte/pde fields Alex Deucher
  2024-04-25 19:58 ` [PATCH 5/5] drm/amdgpu/discovery: Add gmc v12_0 ip block Alex Deucher
  3 siblings, 0 replies; 5+ messages in thread
From: Alex Deucher @ 2024-04-25 19:58 UTC (permalink / raw)
  To: amd-gfx; +Cc: Hawking Zhang, Likun Gao, Alex Deucher

From: Hawking Zhang <Hawking.Zhang@amd.com>

pte_is_pte is new flag introduced in gmc v12 that
needs to be set by default for pte.

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index c85ebc8360e1..c24f5bd3e09c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -686,7 +686,8 @@ static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
 
 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(MTYPE_UC) |
-				 AMDGPU_PTE_EXECUTABLE;
+				    AMDGPU_PTE_EXECUTABLE |
+				    AMDGPU_PTE_IS_PTE;
 
 	return amdgpu_gart_table_vram_alloc(adev);
 }
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 4/5] drm/amdgpu: support gfx v12 specific pte/pde fields
  2024-04-25 19:58 [PATCH 1/5] drm/amdgpu: Add gfx v12 pte/pde format change Alex Deucher
  2024-04-25 19:58 ` [PATCH 2/5] drm/amdgpu: Add gmc v12_0 ip block support (v7) Alex Deucher
  2024-04-25 19:58 ` [PATCH 3/5] drm/amdgpu: Set pte_is_pte flag in gmc v12 gart Alex Deucher
@ 2024-04-25 19:58 ` Alex Deucher
  2024-04-25 19:58 ` [PATCH 5/5] drm/amdgpu/discovery: Add gmc v12_0 ip block Alex Deucher
  3 siblings, 0 replies; 5+ messages in thread
From: Alex Deucher @ 2024-04-25 19:58 UTC (permalink / raw)
  To: amd-gfx; +Cc: Hawking Zhang, Likun Gao, Alex Deucher

From: Hawking Zhang <Hawking.Zhang@amd.com>

Add gfx v12 pte/pde support to gmc common helper.

v2: squash in fixes (Alex)

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    | 12 ++++++------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h    |  6 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c |  6 +++---
 5 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 6bbab141eaae..8fe825479194 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -683,7 +683,7 @@ uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 		pte_flag |= AMDGPU_PTE_WRITEABLE;
 	if (flags & AMDGPU_VM_PAGE_PRT)
-		pte_flag |= AMDGPU_PTE_PRT;
+		pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
 	if (flags & AMDGPU_VM_PAGE_NOALLOC)
 		pte_flag |= AMDGPU_PTE_NOALLOC;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index be4629cdac04..9fcf194fea33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1015,7 +1015,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
 	flags |= AMDGPU_PTE_WRITEABLE;
 	flags |= AMDGPU_PTE_SNOOPED;
 	flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
-	flags |= AMDGPU_PDE_PTE;
+	flags |= AMDGPU_PDE_PTE_FLAG(adev);
 
 	/* The first n PDE0 entries are used as PTE,
 	 * pointing to vram
@@ -1028,7 +1028,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
 	 * pointing to a 4K system page
 	 */
 	flags = AMDGPU_PTE_VALID;
-	flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
+	flags |= AMDGPU_PTE_SNOOPED | AMDGPU_PDE_BFS_FLAG(adev, 0);
 	/* Requires gart_ptb_gpu_pa to be 4K aligned */
 	amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
 	drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4e2391c83d7c..991e4d69c6a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1055,7 +1055,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 				params.pages_addr = NULL;
 			}
 
-		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
 			addr = vram_base + cursor.start;
 		} else {
 			addr = 0;
@@ -1369,7 +1369,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
 				   struct amdgpu_bo_va_mapping *mapping,
 				   struct dma_fence *fence)
 {
-	if (mapping->flags & AMDGPU_PTE_PRT)
+	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
 		amdgpu_vm_add_prt_cb(adev, fence);
 	kfree(mapping);
 }
@@ -1637,7 +1637,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
 	list_add(&mapping->list, &bo_va->invalids);
 	amdgpu_vm_it_insert(mapping, &vm->va);
 
-	if (mapping->flags & AMDGPU_PTE_PRT)
+	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
 		amdgpu_vm_prt_get(adev);
 
 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
@@ -1939,7 +1939,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 		struct amdgpu_bo *bo = before->bo_va->base.bo;
 
 		amdgpu_vm_it_insert(before, &vm->va);
-		if (before->flags & AMDGPU_PTE_PRT)
+		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
 			amdgpu_vm_prt_get(adev);
 
 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
@@ -1954,7 +1954,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 		struct amdgpu_bo *bo = after->bo_va->base.bo;
 
 		amdgpu_vm_it_insert(after, &vm->va);
-		if (after->flags & AMDGPU_PTE_PRT)
+		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
 			amdgpu_vm_prt_get(adev);
 
 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
@@ -2605,7 +2605,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	dma_fence_put(vm->last_tlb_flush);
 
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
-		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+		if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
 			amdgpu_vm_prt_fini(adev, vm);
 			prt_fini_needed = false;
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index e0e7e944a323..aadfd5cc3c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -113,6 +113,8 @@ struct amdgpu_mem_stats;
 
 /* gfx12 */
 #define AMDGPU_PTE_PRT_GFX12		(1ULL << 56)
+#define AMDGPU_PTE_PRT_FLAG(adev)	\
+	((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PTE_PRT_GFX12 : AMDGPU_PTE_PRT)
 
 #define AMDGPU_PTE_MTYPE_GFX12(a)	((uint64_t)(a) << 54)
 #define AMDGPU_PTE_MTYPE_GFX12_MASK	AMDGPU_PTE_MTYPE_GFX12(3ULL)
@@ -121,8 +123,12 @@ struct amdgpu_mem_stats;
 
 /* PDE Block Fragment Size for gfx v12 */
 #define AMDGPU_PDE_BFS_GFX12(a)		((uint64_t)((a) & 0x1fULL) << 58)
+#define AMDGPU_PDE_BFS_FLAG(adev, a)	\
+	((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_BFS_GFX12(a) : AMDGPU_PDE_BFS(a))
 /* PDE is handled as PTE for gfx v12 */
 #define AMDGPU_PDE_PTE_GFX12		(1ULL << 63)
+#define AMDGPU_PDE_PTE_FLAG(adev)	\
+	((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_PTE_GFX12: AMDGPU_PDE_PTE)
 
 /* How to program VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER	0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 7fdd306a48a0..0763382d305a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -413,7 +413,7 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	if (adev->asic_type >= CHIP_VEGA10) {
 		if (level != AMDGPU_VM_PTB) {
 			/* Handle leaf PDEs as PTEs */
-			flags |= AMDGPU_PDE_PTE;
+			flags |= AMDGPU_PDE_PTE_FLAG(adev);
 			amdgpu_gmc_get_vm_pde(adev, level,
 					      &value, &flags);
 		} else {
@@ -757,12 +757,12 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
 	struct amdgpu_device *adev = params->adev;
 
 	if (level != AMDGPU_VM_PTB) {
-		flags |= AMDGPU_PDE_PTE;
+		flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
 		amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
 
 	} else if (adev->asic_type >= CHIP_VEGA10 &&
 		   !(flags & AMDGPU_PTE_VALID) &&
-		   !(flags & AMDGPU_PTE_PRT)) {
+		   !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
 
 		/* Workaround for fault priority problem on GMC9 */
 		flags |= AMDGPU_PTE_EXECUTABLE;
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 5/5] drm/amdgpu/discovery: Add gmc v12_0 ip block
  2024-04-25 19:58 [PATCH 1/5] drm/amdgpu: Add gfx v12 pte/pde format change Alex Deucher
                   ` (2 preceding siblings ...)
  2024-04-25 19:58 ` [PATCH 4/5] drm/amdgpu: support gfx v12 specific pte/pde fields Alex Deucher
@ 2024-04-25 19:58 ` Alex Deucher
  3 siblings, 0 replies; 5+ messages in thread
From: Alex Deucher @ 2024-04-25 19:58 UTC (permalink / raw)
  To: amd-gfx; +Cc: Likun Gao, Hawking Zhang, Alex Deucher

From: Likun Gao <Likun.Gao@amd.com>

Add gmc v12_0 ip block.

v2: Squash in updates (Alex)

Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 79b43e4bf7c8..98d6915e955e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -55,6 +55,7 @@
 #include "smuio_v9_0.h"
 #include "gmc_v10_0.h"
 #include "gmc_v11_0.h"
+#include "gmc_v12_0.h"
 #include "gfxhub_v2_0.h"
 #include "mmhub_v2_0.h"
 #include "nbio_v2_3.h"
@@ -1753,6 +1754,10 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
 	case IP_VERSION(11, 5, 1):
 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
 		break;
+	case IP_VERSION(12, 0, 0):
+	case IP_VERSION(12, 0, 1):
+		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
+		break;
 	default:
 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
 			amdgpu_ip_version(adev, GC_HWIP, 0));
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-04-25 19:59 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-25 19:58 [PATCH 1/5] drm/amdgpu: Add gfx v12 pte/pde format change Alex Deucher
2024-04-25 19:58 ` [PATCH 2/5] drm/amdgpu: Add gmc v12_0 ip block support (v7) Alex Deucher
2024-04-25 19:58 ` [PATCH 3/5] drm/amdgpu: Set pte_is_pte flag in gmc v12 gart Alex Deucher
2024-04-25 19:58 ` [PATCH 4/5] drm/amdgpu: support gfx v12 specific pte/pde fields Alex Deucher
2024-04-25 19:58 ` [PATCH 5/5] drm/amdgpu/discovery: Add gmc v12_0 ip block Alex Deucher

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.