AMD-GFX Archive on lore.kernel.org
 help / color / Atom feed
* [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment
@ 2020-06-25  3:18 Felix Kuehling
  2020-06-25  3:18 ` [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus Felix Kuehling
  2020-06-25  8:18 ` [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment Christian König
  0 siblings, 2 replies; 12+ messages in thread
From: Felix Kuehling @ 2020-06-25  3:18 UTC (permalink / raw)
  To: amd-gfx

The KFD VMID assignment was hard-coded in a few places. Consolidate that in
a single variable adev->vm_manager.first_kfd_vmid. The value is still
assigned in gmc-ip-version-specific code.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 12 +++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c    |  3 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     |  1 +
 drivers/gpu/drm/amd/amdgpu/cikd.h          |  2 --
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c     |  6 ++----
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c      |  6 ++----
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c      |  6 ++----
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |  6 ++----
 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c     |  5 +----
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  7 +------
 drivers/gpu/drm/amd/amdgpu/si_enums.h      |  1 -
 drivers/gpu/drm/amd/amdgpu/sid.h           |  2 --
 drivers/gpu/drm/amd/amdgpu/vid.h           |  2 --
 16 files changed, 22 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index ad59ac4423b8..1b865fed74ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -31,8 +31,6 @@
 #include "amdgpu_xgmi.h"
 #include <uapi/linux/kfd_ioctl.h>
 
-static const unsigned int compute_vmid_bitmap = 0xFF00;
-
 /* Total memory size in system memory and all GPU VRAM. Used to
  * estimate worst case amount of memory to reserve for page tables
  */
@@ -113,7 +111,9 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 
 	if (adev->kfd.dev) {
 		struct kgd2kfd_shared_resources gpu_resources = {
-			.compute_vmid_bitmap = compute_vmid_bitmap,
+			.compute_vmid_bitmap =
+				((1 << AMDGPU_NUM_VMID) - 1) -
+				((1 << adev->vm_manager.first_kfd_vmid) - 1),
 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
 			.gpuvm_size = min(adev->vm_manager.max_pfn
@@ -637,10 +637,8 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
 {
-	if (adev->kfd.dev) {
-		if ((1 << vmid) & compute_vmid_bitmap)
-			return true;
-	}
+	if (adev->kfd.dev)
+		return vmid >= adev->vm_manager.first_kfd_vmid;
 
 	return false;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 267fa45ddb66..7521f4ab55de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -574,6 +574,9 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
 		INIT_LIST_HEAD(&id_mgr->ids_lru);
 		atomic_set(&id_mgr->reserved_vmid_num, 0);
 
+		/* manage only VMIDs not used by KFD */
+		id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+
 		/* skip over VMID 0, since it is the system VM */
 		for (j = 1; j < id_mgr->num_ids; ++j) {
 			amdgpu_vmid_reset(adev, i, j);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index c8e68d7890bf..770025a5e500 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -324,6 +324,7 @@ struct amdgpu_vm {
 struct amdgpu_vm_manager {
 	/* Handling of VMIDs */
 	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
+	unsigned int				first_kfd_vmid;
 
 	/* Handling of VM fences */
 	u64					fence_context;
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 5f3f6ebfb387..55982c0064b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -54,8 +54,6 @@
 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
 #define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
 
-#define AMDGPU_NUM_OF_VMIDS	8
-
 #define		PIPEID(x)					((x) << 0)
 #define		MEID(x)						((x) << 2)
 #define		VMID(x)						((x) << 4)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 323285eb1457..8366c506a8b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4512,8 +4512,6 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
 }
 
 #define DEFAULT_SH_MEM_BASES	(0x6000)
-#define FIRST_COMPUTE_VMID	(8)
-#define LAST_COMPUTE_VMID	(16)
 
 static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
 {
@@ -4529,7 +4527,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
 
 	mutex_lock(&adev->srbm_mutex);
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		nv_grbm_select(adev, 0, 0, 0, i);
 		/* CP and shaders */
 		WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
@@ -4540,7 +4538,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
 
 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
 	   acccess. These should be enabled by FW for target VMIDs. */
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4aec76049a60..04eaf3a8fddb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1850,8 +1850,6 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
  *
  */
 #define DEFAULT_SH_MEM_BASES	(0x6000)
-#define FIRST_COMPUTE_VMID	(8)
-#define LAST_COMPUTE_VMID	(16)
 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
 {
 	int i;
@@ -1869,7 +1867,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
 	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
 	mutex_lock(&adev->srbm_mutex);
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		cik_srbm_select(adev, 0, 0, 0, i);
 		/* CP and shaders */
 		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
@@ -1882,7 +1880,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
 
 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
 	   acccess. These should be enabled by FW for target VMIDs. */
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
 		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
 		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index efb759b62d21..33f1c4a46ebe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3686,8 +3686,6 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
  *
  */
 #define DEFAULT_SH_MEM_BASES	(0x6000)
-#define FIRST_COMPUTE_VMID	(8)
-#define LAST_COMPUTE_VMID	(16)
 static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 {
 	int i;
@@ -3710,7 +3708,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
 
 	mutex_lock(&adev->srbm_mutex);
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		vi_srbm_select(adev, 0, 0, 0, i);
 		/* CP and shaders */
 		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
@@ -3723,7 +3721,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 
 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
 	   acccess. These should be enabled by FW for target VMIDs. */
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
 		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
 		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 99ffc3e1fddc..cb9d60a4e05e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2463,8 +2463,6 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
 }
 
 #define DEFAULT_SH_MEM_BASES	(0x6000)
-#define FIRST_COMPUTE_VMID	(8)
-#define LAST_COMPUTE_VMID	(16)
 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
 {
 	int i;
@@ -2484,7 +2482,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
 
 	mutex_lock(&adev->srbm_mutex);
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		soc15_grbm_select(adev, 0, 0, 0, i);
 		/* CP and shaders */
 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
@@ -2495,7 +2493,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
 
 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
 	   acccess. These should be enabled by FW for target VMIDs. */
-	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f7e66bf0f647..bfe62985afff 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -49,8 +49,6 @@
 #include "mmhub_v2_0.h"
 #include "athub_v2_0.h"
 #include "athub_v2_1.h"
-/* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
-#define AMDGPU_NUM_OF_VMIDS			8
 
 #if 0
 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
@@ -905,8 +903,7 @@ static int gmc_v10_0_sw_init(void *handle)
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 */
-	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
-	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.first_kfd_vmid = 8;
 
 	amdgpu_vm_manager_init(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index a75e472b4a81..538e7ee35cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -878,7 +878,7 @@ static int gmc_v6_0_sw_init(void *handle)
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 */
-	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.first_kfd_vmid = 8;
 	amdgpu_vm_manager_init(adev);
 
 	/* base offset of vram pages */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index bcd4baecfe11..e18296dc1386 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1052,7 +1052,7 @@ static int gmc_v7_0_sw_init(void *handle)
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 */
-	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.first_kfd_vmid = 8;
 	amdgpu_vm_manager_init(adev);
 
 	/* base offset of vram pages */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 26976e50e2a2..a9e722b8a458 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1177,7 +1177,7 @@ static int gmc_v8_0_sw_init(void *handle)
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 */
-	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.first_kfd_vmid = 8;
 	amdgpu_vm_manager_init(adev);
 
 	/* base offset of vram pages */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 11e93a82131d..6e10b42c57e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -68,9 +68,6 @@
 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
 
-/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
-#define AMDGPU_NUM_OF_VMIDS			8
-
 static const u32 golden_settings_vega10_hdp[] =
 {
 	0xf64, 0x0fffffff, 0x00000000,
@@ -1251,9 +1248,7 @@ static int gmc_v9_0_sw_init(void *handle)
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 */
-	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
-	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
-	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.first_kfd_vmid = 8;
 
 	amdgpu_vm_manager_init(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index 790ba46eaebb..4e935baa7b91 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -121,7 +121,6 @@
 #define CURSOR_UPDATE_LOCK             (1 << 16)
 #define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
 
-#define AMDGPU_NUM_OF_VMIDS                     8
 #define SI_CRTC0_REGISTER_OFFSET                0
 #define SI_CRTC1_REGISTER_OFFSET                0x300
 #define SI_CRTC2_REGISTER_OFFSET                0x2600
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index 5f660f0c819f..ca2e9d661b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -48,8 +48,6 @@
 #define SI_MAX_TCC               	16
 #define SI_MAX_TCC_MASK          	0xFFFF
 
-#define AMDGPU_NUM_OF_VMIDS 		8
-
 /* SMC IND accessor regs */
 #define SMC_IND_INDEX_0                              0x80
 #define SMC_IND_DATA_0                               0x81
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 7a01e6133798..80ce42aacc0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -67,8 +67,6 @@
 #define HPD4_REGISTER_OFFSET                 (0x18b8 - 0x1898)
 #define HPD5_REGISTER_OFFSET                 (0x18c0 - 0x1898)
 
-#define AMDGPU_NUM_OF_VMIDS			8
-
 #define		PIPEID(x)					((x) << 0)
 #define		MEID(x)						((x) << 2)
 #define		VMID(x)						((x) << 4)
-- 
2.26.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25  3:18 [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment Felix Kuehling
@ 2020-06-25  3:18 ` Felix Kuehling
  2020-06-25  8:19   ` Christian König
  2020-06-25  8:18 ` [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment Christian König
  1 sibling, 1 reply; 12+ messages in thread
From: Felix Kuehling @ 2020-06-25  3:18 UTC (permalink / raw)
  To: amd-gfx

When there is no graphics support, KFD can use more of the VMIDs. Graphics
VMIDs are only used for video decoding/encoding and post processing. With
two VCE engines, there is no reason to reserve more than 2 VMIDs for that.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6e10b42c57e5..3470929e5b8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
 	/*
 	 * number of VMs
 	 * VMID 0 is reserved for System
-	 * amdgpu graphics/compute will use VMIDs 1-7
-	 * amdkfd will use VMIDs 8-15
+	 * amdgpu graphics/compute will use VMIDs 1..n-1
+	 * amdkfd will use VMIDs n..15
+	 *
+	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
+	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
+	 * for video processing.
 	 */
-	adev->vm_manager.first_kfd_vmid = 8;
+	adev->vm_manager.first_kfd_vmid =
+		adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
 
 	amdgpu_vm_manager_init(adev);
 
-- 
2.26.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment
  2020-06-25  3:18 [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment Felix Kuehling
  2020-06-25  3:18 ` [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus Felix Kuehling
@ 2020-06-25  8:18 ` Christian König
  1 sibling, 0 replies; 12+ messages in thread
From: Christian König @ 2020-06-25  8:18 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx

Am 25.06.20 um 05:18 schrieb Felix Kuehling:
> The KFD VMID assignment was hard-coded in a few places. Consolidate that in
> a single variable adev->vm_manager.first_kfd_vmid. The value is still
> assigned in gmc-ip-version-specific code.
>
> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com> for this one.

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 12 +++++-------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c    |  3 +++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     |  1 +
>   drivers/gpu/drm/amd/amdgpu/cikd.h          |  2 --
>   drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c     |  6 ++----
>   drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c      |  6 ++----
>   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c      |  6 ++----
>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |  6 ++----
>   drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c     |  5 +----
>   drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  2 +-
>   drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  2 +-
>   drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  2 +-
>   drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  7 +------
>   drivers/gpu/drm/amd/amdgpu/si_enums.h      |  1 -
>   drivers/gpu/drm/amd/amdgpu/sid.h           |  2 --
>   drivers/gpu/drm/amd/amdgpu/vid.h           |  2 --
>   16 files changed, 22 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> index ad59ac4423b8..1b865fed74ca 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> @@ -31,8 +31,6 @@
>   #include "amdgpu_xgmi.h"
>   #include <uapi/linux/kfd_ioctl.h>
>   
> -static const unsigned int compute_vmid_bitmap = 0xFF00;
> -
>   /* Total memory size in system memory and all GPU VRAM. Used to
>    * estimate worst case amount of memory to reserve for page tables
>    */
> @@ -113,7 +111,9 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
>   
>   	if (adev->kfd.dev) {
>   		struct kgd2kfd_shared_resources gpu_resources = {
> -			.compute_vmid_bitmap = compute_vmid_bitmap,
> +			.compute_vmid_bitmap =
> +				((1 << AMDGPU_NUM_VMID) - 1) -
> +				((1 << adev->vm_manager.first_kfd_vmid) - 1),
>   			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
>   			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
>   			.gpuvm_size = min(adev->vm_manager.max_pfn
> @@ -637,10 +637,8 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
>   
>   bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
>   {
> -	if (adev->kfd.dev) {
> -		if ((1 << vmid) & compute_vmid_bitmap)
> -			return true;
> -	}
> +	if (adev->kfd.dev)
> +		return vmid >= adev->vm_manager.first_kfd_vmid;
>   
>   	return false;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index 267fa45ddb66..7521f4ab55de 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -574,6 +574,9 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
>   		INIT_LIST_HEAD(&id_mgr->ids_lru);
>   		atomic_set(&id_mgr->reserved_vmid_num, 0);
>   
> +		/* manage only VMIDs not used by KFD */
> +		id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
> +
>   		/* skip over VMID 0, since it is the system VM */
>   		for (j = 1; j < id_mgr->num_ids; ++j) {
>   			amdgpu_vmid_reset(adev, i, j);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index c8e68d7890bf..770025a5e500 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -324,6 +324,7 @@ struct amdgpu_vm {
>   struct amdgpu_vm_manager {
>   	/* Handling of VMIDs */
>   	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
> +	unsigned int				first_kfd_vmid;
>   
>   	/* Handling of VM fences */
>   	u64					fence_context;
> diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
> index 5f3f6ebfb387..55982c0064b5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/cikd.h
> +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
> @@ -54,8 +54,6 @@
>   #define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
>   #define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
>   
> -#define AMDGPU_NUM_OF_VMIDS	8
> -
>   #define		PIPEID(x)					((x) << 0)
>   #define		MEID(x)						((x) << 2)
>   #define		VMID(x)						((x) << 4)
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> index 323285eb1457..8366c506a8b5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
> @@ -4512,8 +4512,6 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
>   }
>   
>   #define DEFAULT_SH_MEM_BASES	(0x6000)
> -#define FIRST_COMPUTE_VMID	(8)
> -#define LAST_COMPUTE_VMID	(16)
>   
>   static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
>   {
> @@ -4529,7 +4527,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
>   	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
>   
>   	mutex_lock(&adev->srbm_mutex);
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		nv_grbm_select(adev, 0, 0, 0, i);
>   		/* CP and shaders */
>   		WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
> @@ -4540,7 +4538,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
>   
>   	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
>   	   acccess. These should be enabled by FW for target VMIDs. */
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
>   		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
>   		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> index 4aec76049a60..04eaf3a8fddb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> @@ -1850,8 +1850,6 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
>    *
>    */
>   #define DEFAULT_SH_MEM_BASES	(0x6000)
> -#define FIRST_COMPUTE_VMID	(8)
> -#define LAST_COMPUTE_VMID	(16)
>   static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
>   {
>   	int i;
> @@ -1869,7 +1867,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
>   			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
>   	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
>   	mutex_lock(&adev->srbm_mutex);
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		cik_srbm_select(adev, 0, 0, 0, i);
>   		/* CP and shaders */
>   		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
> @@ -1882,7 +1880,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
>   
>   	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
>   	   acccess. These should be enabled by FW for target VMIDs. */
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
>   		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
>   		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> index efb759b62d21..33f1c4a46ebe 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> @@ -3686,8 +3686,6 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
>    *
>    */
>   #define DEFAULT_SH_MEM_BASES	(0x6000)
> -#define FIRST_COMPUTE_VMID	(8)
> -#define LAST_COMPUTE_VMID	(16)
>   static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
>   {
>   	int i;
> @@ -3710,7 +3708,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
>   			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
>   
>   	mutex_lock(&adev->srbm_mutex);
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		vi_srbm_select(adev, 0, 0, 0, i);
>   		/* CP and shaders */
>   		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
> @@ -3723,7 +3721,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
>   
>   	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
>   	   acccess. These should be enabled by FW for target VMIDs. */
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
>   		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
>   		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index 99ffc3e1fddc..cb9d60a4e05e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -2463,8 +2463,6 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
>   }
>   
>   #define DEFAULT_SH_MEM_BASES	(0x6000)
> -#define FIRST_COMPUTE_VMID	(8)
> -#define LAST_COMPUTE_VMID	(16)
>   static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
>   {
>   	int i;
> @@ -2484,7 +2482,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
>   			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
>   
>   	mutex_lock(&adev->srbm_mutex);
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		soc15_grbm_select(adev, 0, 0, 0, i);
>   		/* CP and shaders */
>   		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
> @@ -2495,7 +2493,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
>   
>   	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
>   	   acccess. These should be enabled by FW for target VMIDs. */
> -	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
> +	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
>   		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
>   		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
>   		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> index f7e66bf0f647..bfe62985afff 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> @@ -49,8 +49,6 @@
>   #include "mmhub_v2_0.h"
>   #include "athub_v2_0.h"
>   #include "athub_v2_1.h"
> -/* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
> -#define AMDGPU_NUM_OF_VMIDS			8
>   
>   #if 0
>   static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
> @@ -905,8 +903,7 @@ static int gmc_v10_0_sw_init(void *handle)
>   	 * amdgpu graphics/compute will use VMIDs 1-7
>   	 * amdkfd will use VMIDs 8-15
>   	 */
> -	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
> -	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
> +	adev->vm_manager.first_kfd_vmid = 8;
>   
>   	amdgpu_vm_manager_init(adev);
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> index a75e472b4a81..538e7ee35cdf 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> @@ -878,7 +878,7 @@ static int gmc_v6_0_sw_init(void *handle)
>   	 * amdgpu graphics/compute will use VMIDs 1-7
>   	 * amdkfd will use VMIDs 8-15
>   	 */
> -	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
> +	adev->vm_manager.first_kfd_vmid = 8;
>   	amdgpu_vm_manager_init(adev);
>   
>   	/* base offset of vram pages */
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> index bcd4baecfe11..e18296dc1386 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> @@ -1052,7 +1052,7 @@ static int gmc_v7_0_sw_init(void *handle)
>   	 * amdgpu graphics/compute will use VMIDs 1-7
>   	 * amdkfd will use VMIDs 8-15
>   	 */
> -	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
> +	adev->vm_manager.first_kfd_vmid = 8;
>   	amdgpu_vm_manager_init(adev);
>   
>   	/* base offset of vram pages */
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> index 26976e50e2a2..a9e722b8a458 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> @@ -1177,7 +1177,7 @@ static int gmc_v8_0_sw_init(void *handle)
>   	 * amdgpu graphics/compute will use VMIDs 1-7
>   	 * amdkfd will use VMIDs 8-15
>   	 */
> -	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
> +	adev->vm_manager.first_kfd_vmid = 8;
>   	amdgpu_vm_manager_init(adev);
>   
>   	/* base offset of vram pages */
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index 11e93a82131d..6e10b42c57e5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -68,9 +68,6 @@
>   #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
>   #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
>   
> -/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
> -#define AMDGPU_NUM_OF_VMIDS			8
> -
>   static const u32 golden_settings_vega10_hdp[] =
>   {
>   	0xf64, 0x0fffffff, 0x00000000,
> @@ -1251,9 +1248,7 @@ static int gmc_v9_0_sw_init(void *handle)
>   	 * amdgpu graphics/compute will use VMIDs 1-7
>   	 * amdkfd will use VMIDs 8-15
>   	 */
> -	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
> -	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
> -	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
> +	adev->vm_manager.first_kfd_vmid = 8;
>   
>   	amdgpu_vm_manager_init(adev);
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
> index 790ba46eaebb..4e935baa7b91 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
> +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
> @@ -121,7 +121,6 @@
>   #define CURSOR_UPDATE_LOCK             (1 << 16)
>   #define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
>   
> -#define AMDGPU_NUM_OF_VMIDS                     8
>   #define SI_CRTC0_REGISTER_OFFSET                0
>   #define SI_CRTC1_REGISTER_OFFSET                0x300
>   #define SI_CRTC2_REGISTER_OFFSET                0x2600
> diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
> index 5f660f0c819f..ca2e9d661b28 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sid.h
> +++ b/drivers/gpu/drm/amd/amdgpu/sid.h
> @@ -48,8 +48,6 @@
>   #define SI_MAX_TCC               	16
>   #define SI_MAX_TCC_MASK          	0xFFFF
>   
> -#define AMDGPU_NUM_OF_VMIDS 		8
> -
>   /* SMC IND accessor regs */
>   #define SMC_IND_INDEX_0                              0x80
>   #define SMC_IND_DATA_0                               0x81
> diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
> index 7a01e6133798..80ce42aacc0c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vid.h
> +++ b/drivers/gpu/drm/amd/amdgpu/vid.h
> @@ -67,8 +67,6 @@
>   #define HPD4_REGISTER_OFFSET                 (0x18b8 - 0x1898)
>   #define HPD5_REGISTER_OFFSET                 (0x18c0 - 0x1898)
>   
> -#define AMDGPU_NUM_OF_VMIDS			8
> -
>   #define		PIPEID(x)					((x) << 0)
>   #define		MEID(x)						((x) << 2)
>   #define		VMID(x)						((x) << 4)

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25  3:18 ` [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus Felix Kuehling
@ 2020-06-25  8:19   ` Christian König
  2020-06-25 15:15     ` Felix Kuehling
  0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2020-06-25  8:19 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx

Am 25.06.20 um 05:18 schrieb Felix Kuehling:
> When there is no graphics support, KFD can use more of the VMIDs. Graphics
> VMIDs are only used for video decoding/encoding and post processing. With
> two VCE engines, there is no reason to reserve more than 2 VMIDs for that.

IIRC the expectation is that we still use the compute queues for post 
processing and not the KFD.

So we will need at least VMIDs for that as well.

>
> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>   1 file changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index 6e10b42c57e5..3470929e5b8e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>   	/*
>   	 * number of VMs
>   	 * VMID 0 is reserved for System
> -	 * amdgpu graphics/compute will use VMIDs 1-7
> -	 * amdkfd will use VMIDs 8-15
> +	 * amdgpu graphics/compute will use VMIDs 1..n-1
> +	 * amdkfd will use VMIDs n..15
> +	 *
> +	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
> +	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
> +	 * for video processing.
>   	 */
> -	adev->vm_manager.first_kfd_vmid = 8;
> +	adev->vm_manager.first_kfd_vmid =
> +		adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>   
>   	amdgpu_vm_manager_init(adev);
>   

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25  8:19   ` Christian König
@ 2020-06-25 15:15     ` Felix Kuehling
  2020-06-25 15:38       ` Christian König
  0 siblings, 1 reply; 12+ messages in thread
From: Felix Kuehling @ 2020-06-25 15:15 UTC (permalink / raw)
  To: christian.koenig, amd-gfx

Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>> When there is no graphics support, KFD can use more of the VMIDs.
>> Graphics
>> VMIDs are only used for video decoding/encoding and post processing.
>> With
>> two VCE engines, there is no reason to reserve more than 2 VMIDs for
>> that.
>
> IIRC the expectation is that we still use the compute queues for post
> processing and not the KFD.
>
> So we will need at least VMIDs for that as well.

Correct. Post processing uses compute queues and VMIDs in the GFXHUB.
VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
context. So can't they share the same VMIDs?

Regards,
  Felix


>
>>
>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>   1 file changed, 8 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> index 6e10b42c57e5..3470929e5b8e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>       /*
>>        * number of VMs
>>        * VMID 0 is reserved for System
>> -     * amdgpu graphics/compute will use VMIDs 1-7
>> -     * amdkfd will use VMIDs 8-15
>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>> +     * amdkfd will use VMIDs n..15
>> +     *
>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
>> +     * for video processing.
>>        */
>> -    adev->vm_manager.first_kfd_vmid = 8;
>> +    adev->vm_manager.first_kfd_vmid =
>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>         amdgpu_vm_manager_init(adev);
>>   
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25 15:15     ` Felix Kuehling
@ 2020-06-25 15:38       ` Christian König
  2020-06-25 15:43         ` Felix Kuehling
  0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2020-06-25 15:38 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx

Am 25.06.20 um 17:15 schrieb Felix Kuehling:
> Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
>> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>>> When there is no graphics support, KFD can use more of the VMIDs.
>>> Graphics
>>> VMIDs are only used for video decoding/encoding and post processing.
>>> With
>>> two VCE engines, there is no reason to reserve more than 2 VMIDs for
>>> that.
>> IIRC the expectation is that we still use the compute queues for post
>> processing and not the KFD.
>>
>> So we will need at least VMIDs for that as well.
> Correct. Post processing uses compute queues and VMIDs in the GFXHUB.
> VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
> context. So can't they share the same VMIDs?

Ah! Good point, But we still need at least 3 VMID when VMID reservation 
is active.

I don't think you can go below this.

Regards,
Christian.

>
> Regards,
>    Felix
>
>
>>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>>> ---
>>>    drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>>    1 file changed, 8 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>> index 6e10b42c57e5..3470929e5b8e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>>        /*
>>>         * number of VMs
>>>         * VMID 0 is reserved for System
>>> -     * amdgpu graphics/compute will use VMIDs 1-7
>>> -     * amdkfd will use VMIDs 8-15
>>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>>> +     * amdkfd will use VMIDs n..15
>>> +     *
>>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
>>> +     * for video processing.
>>>         */
>>> -    adev->vm_manager.first_kfd_vmid = 8;
>>> +    adev->vm_manager.first_kfd_vmid =
>>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>>          amdgpu_vm_manager_init(adev);
>>>    

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25 15:38       ` Christian König
@ 2020-06-25 15:43         ` Felix Kuehling
  2020-06-25 15:50           ` Christian König
  0 siblings, 1 reply; 12+ messages in thread
From: Felix Kuehling @ 2020-06-25 15:43 UTC (permalink / raw)
  To: Christian König, amd-gfx

Am 2020-06-25 um 11:38 a.m. schrieb Christian König:
> Am 25.06.20 um 17:15 schrieb Felix Kuehling:
>> Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
>>> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>>>> When there is no graphics support, KFD can use more of the VMIDs.
>>>> Graphics
>>>> VMIDs are only used for video decoding/encoding and post processing.
>>>> With
>>>> two VCE engines, there is no reason to reserve more than 2 VMIDs for
>>>> that.
>>> IIRC the expectation is that we still use the compute queues for post
>>> processing and not the KFD.
>>>
>>> So we will need at least VMIDs for that as well.
>> Correct. Post processing uses compute queues and VMIDs in the GFXHUB.
>> VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
>> context. So can't they share the same VMIDs?
>
> Ah! Good point, But we still need at least 3 VMID when VMID
> reservation is active.

I don't know anything about that VMID reservation feature. What is it
used for? Who is using it? How many VMIDs can be reserved?

If one VMID is reserved, there would still be one VMID left for video
post processing. That's not ideal, but I don't think it would be fatal.
But is it a realistic use case that VMID reservation and ROCm+video
processing would happen on the same system at the same time?

Thanks,
  Felix


>
> I don't think you can go below this.
>
> Regards,
> Christian.
>
>>
>> Regards,
>>    Felix
>>
>>
>>>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>>>> ---
>>>>    drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>>>    1 file changed, 8 insertions(+), 3 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>> index 6e10b42c57e5..3470929e5b8e 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>>>        /*
>>>>         * number of VMs
>>>>         * VMID 0 is reserved for System
>>>> -     * amdgpu graphics/compute will use VMIDs 1-7
>>>> -     * amdkfd will use VMIDs 8-15
>>>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>>>> +     * amdkfd will use VMIDs n..15
>>>> +     *
>>>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>>>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
>>>> +     * for video processing.
>>>>         */
>>>> -    adev->vm_manager.first_kfd_vmid = 8;
>>>> +    adev->vm_manager.first_kfd_vmid =
>>>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>>>          amdgpu_vm_manager_init(adev);
>>>>    
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25 15:43         ` Felix Kuehling
@ 2020-06-25 15:50           ` Christian König
  2020-06-25 15:58             ` Felix Kuehling
  0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2020-06-25 15:50 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx

Am 25.06.20 um 17:43 schrieb Felix Kuehling:
> Am 2020-06-25 um 11:38 a.m. schrieb Christian König:
>> Am 25.06.20 um 17:15 schrieb Felix Kuehling:
>>> Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
>>>> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>>>>> When there is no graphics support, KFD can use more of the VMIDs.
>>>>> Graphics
>>>>> VMIDs are only used for video decoding/encoding and post processing.
>>>>> With
>>>>> two VCE engines, there is no reason to reserve more than 2 VMIDs for
>>>>> that.
>>>> IIRC the expectation is that we still use the compute queues for post
>>>> processing and not the KFD.
>>>>
>>>> So we will need at least VMIDs for that as well.
>>> Correct. Post processing uses compute queues and VMIDs in the GFXHUB.
>>> VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
>>> context. So can't they share the same VMIDs?
>> Ah! Good point, But we still need at least 3 VMID when VMID
>> reservation is active.
> I don't know anything about that VMID reservation feature. What is it
> used for? Who is using it? How many VMIDs can be reserved?
>
> If one VMID is reserved, there would still be one VMID left for video
> post processing. That's not ideal, but I don't think it would be fatal.
> But is it a realistic use case that VMID reservation and ROCm+video
> processing would happen on the same system at the same time?

VMID reservation is used for debugging and yes there can only be one 
reserved.

But I think we need at least two for dynamic assignment or we might run 
into a BUG_ON() while giving VMIDs to jobs.

But I certainly need to test this as well. It's possible that 1 VMID 
indeed works as expected.

Regards,
Christian.

>
> Thanks,
>    Felix
>
>
>> I don't think you can go below this.
>>
>> Regards,
>> Christian.
>>
>>> Regards,
>>>     Felix
>>>
>>>
>>>>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>>>>> ---
>>>>>     drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>>>>     1 file changed, 8 insertions(+), 3 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>> index 6e10b42c57e5..3470929e5b8e 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>>>>         /*
>>>>>          * number of VMs
>>>>>          * VMID 0 is reserved for System
>>>>> -     * amdgpu graphics/compute will use VMIDs 1-7
>>>>> -     * amdkfd will use VMIDs 8-15
>>>>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>>>>> +     * amdkfd will use VMIDs n..15
>>>>> +     *
>>>>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>>>>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
>>>>> +     * for video processing.
>>>>>          */
>>>>> -    adev->vm_manager.first_kfd_vmid = 8;
>>>>> +    adev->vm_manager.first_kfd_vmid =
>>>>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>>>>           amdgpu_vm_manager_init(adev);
>>>>>     

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25 15:50           ` Christian König
@ 2020-06-25 15:58             ` Felix Kuehling
  2020-06-25 16:01               ` Christian König
  0 siblings, 1 reply; 12+ messages in thread
From: Felix Kuehling @ 2020-06-25 15:58 UTC (permalink / raw)
  To: Christian König, amd-gfx


Am 2020-06-25 um 11:50 a.m. schrieb Christian König:
> Am 25.06.20 um 17:43 schrieb Felix Kuehling:
>> Am 2020-06-25 um 11:38 a.m. schrieb Christian König:
>>> Am 25.06.20 um 17:15 schrieb Felix Kuehling:
>>>> Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
>>>>> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>>>>>> When there is no graphics support, KFD can use more of the VMIDs.
>>>>>> Graphics
>>>>>> VMIDs are only used for video decoding/encoding and post processing.
>>>>>> With
>>>>>> two VCE engines, there is no reason to reserve more than 2 VMIDs for
>>>>>> that.
>>>>> IIRC the expectation is that we still use the compute queues for post
>>>>> processing and not the KFD.
>>>>>
>>>>> So we will need at least VMIDs for that as well.
>>>> Correct. Post processing uses compute queues and VMIDs in the GFXHUB.
>>>> VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
>>>> context. So can't they share the same VMIDs?
>>> Ah! Good point, But we still need at least 3 VMID when VMID
>>> reservation is active.
>> I don't know anything about that VMID reservation feature. What is it
>> used for? Who is using it? How many VMIDs can be reserved?
>>
>> If one VMID is reserved, there would still be one VMID left for video
>> post processing. That's not ideal, but I don't think it would be fatal.
>> But is it a realistic use case that VMID reservation and ROCm+video
>> processing would happen on the same system at the same time?
>
> VMID reservation is used for debugging and yes there can only be one
> reserved.
>
> But I think we need at least two for dynamic assignment or we might
> run into a BUG_ON() while giving VMIDs to jobs.

I don't see any BUGs or BUG_ONs in amdgpu_ids.c. What should I be
looking out for?


> But I certainly need to test this as well. It's possible that 1 VMID
> indeed works as expected.

I could run the test, if you describe the problematic scenario you have
in mind.

Thanks,
  Felix


>
> Regards,
> Christian.
>
>>
>> Thanks,
>>    Felix
>>
>>
>>> I don't think you can go below this.
>>>
>>> Regards,
>>> Christian.
>>>
>>>> Regards,
>>>>     Felix
>>>>
>>>>
>>>>>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>>>>>> ---
>>>>>>     drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>>>>>     1 file changed, 8 insertions(+), 3 deletions(-)
>>>>>>
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>> index 6e10b42c57e5..3470929e5b8e 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>>>>>         /*
>>>>>>          * number of VMs
>>>>>>          * VMID 0 is reserved for System
>>>>>> -     * amdgpu graphics/compute will use VMIDs 1-7
>>>>>> -     * amdkfd will use VMIDs 8-15
>>>>>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>>>>>> +     * amdkfd will use VMIDs n..15
>>>>>> +     *
>>>>>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>>>>>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
>>>>>> +     * for video processing.
>>>>>>          */
>>>>>> -    adev->vm_manager.first_kfd_vmid = 8;
>>>>>> +    adev->vm_manager.first_kfd_vmid =
>>>>>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>>>>>           amdgpu_vm_manager_init(adev);
>>>>>>     
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25 15:58             ` Felix Kuehling
@ 2020-06-25 16:01               ` Christian König
  2020-06-25 20:39                 ` Felix Kuehling
  0 siblings, 1 reply; 12+ messages in thread
From: Christian König @ 2020-06-25 16:01 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx

Am 25.06.20 um 17:58 schrieb Felix Kuehling:
> Am 2020-06-25 um 11:50 a.m. schrieb Christian König:
>> Am 25.06.20 um 17:43 schrieb Felix Kuehling:
>>> Am 2020-06-25 um 11:38 a.m. schrieb Christian König:
>>>> Am 25.06.20 um 17:15 schrieb Felix Kuehling:
>>>>> Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
>>>>>> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>>>>>>> When there is no graphics support, KFD can use more of the VMIDs.
>>>>>>> Graphics
>>>>>>> VMIDs are only used for video decoding/encoding and post processing.
>>>>>>> With
>>>>>>> two VCE engines, there is no reason to reserve more than 2 VMIDs for
>>>>>>> that.
>>>>>> IIRC the expectation is that we still use the compute queues for post
>>>>>> processing and not the KFD.
>>>>>>
>>>>>> So we will need at least VMIDs for that as well.
>>>>> Correct. Post processing uses compute queues and VMIDs in the GFXHUB.
>>>>> VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
>>>>> context. So can't they share the same VMIDs?
>>>> Ah! Good point, But we still need at least 3 VMID when VMID
>>>> reservation is active.
>>> I don't know anything about that VMID reservation feature. What is it
>>> used for? Who is using it? How many VMIDs can be reserved?
>>>
>>> If one VMID is reserved, there would still be one VMID left for video
>>> post processing. That's not ideal, but I don't think it would be fatal.
>>> But is it a realistic use case that VMID reservation and ROCm+video
>>> processing would happen on the same system at the same time?
>> VMID reservation is used for debugging and yes there can only be one
>> reserved.
>>
>> But I think we need at least two for dynamic assignment or we might
>> run into a BUG_ON() while giving VMIDs to jobs.
> I don't see any BUGs or BUG_ONs in amdgpu_ids.c. What should I be
> looking out for?

We used to have a BUG_ON() when we couldn't find a VMID as alternative 
if the process already has one but needs to flush it.

But it's a long time ago that I last looked into this.

>> But I certainly need to test this as well. It's possible that 1 VMID
>> indeed works as expected.
> I could run the test, if you describe the problematic scenario you have
> in mind.

Just try to set the available VMIDs to 1 and see if GFX/Compute and MM 
submission work at the same time from multiple processes.

A few UVD video playbacks at the same time should do the job.

Regards,
Christian.

>
> Thanks,
>    Felix
>
>
>> Regards,
>> Christian.
>>
>>> Thanks,
>>>     Felix
>>>
>>>
>>>> I don't think you can go below this.
>>>>
>>>> Regards,
>>>> Christian.
>>>>
>>>>> Regards,
>>>>>      Felix
>>>>>
>>>>>
>>>>>>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>>>>>>> ---
>>>>>>>      drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>>>>>>      1 file changed, 8 insertions(+), 3 deletions(-)
>>>>>>>
>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>> index 6e10b42c57e5..3470929e5b8e 100644
>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>>>>>>          /*
>>>>>>>           * number of VMs
>>>>>>>           * VMID 0 is reserved for System
>>>>>>> -     * amdgpu graphics/compute will use VMIDs 1-7
>>>>>>> -     * amdkfd will use VMIDs 8-15
>>>>>>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>>>>>>> +     * amdkfd will use VMIDs n..15
>>>>>>> +     *
>>>>>>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>>>>>>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
>>>>>>> +     * for video processing.
>>>>>>>           */
>>>>>>> -    adev->vm_manager.first_kfd_vmid = 8;
>>>>>>> +    adev->vm_manager.first_kfd_vmid =
>>>>>>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>>>>>>            amdgpu_vm_manager_init(adev);
>>>>>>>      

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25 16:01               ` Christian König
@ 2020-06-25 20:39                 ` Felix Kuehling
  2020-06-26  6:57                   ` Christian König
  0 siblings, 1 reply; 12+ messages in thread
From: Felix Kuehling @ 2020-06-25 20:39 UTC (permalink / raw)
  To: Christian König, amd-gfx


On 2020-06-25 12:01 p.m., Christian König wrote:
> Am 25.06.20 um 17:58 schrieb Felix Kuehling:
>> Am 2020-06-25 um 11:50 a.m. schrieb Christian König:
>>> Am 25.06.20 um 17:43 schrieb Felix Kuehling:
>>>> Am 2020-06-25 um 11:38 a.m. schrieb Christian König:
>>>>> Am 25.06.20 um 17:15 schrieb Felix Kuehling:
>>>>>> Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
>>>>>>> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>>>>>>>> When there is no graphics support, KFD can use more of the VMIDs.
>>>>>>>> Graphics
>>>>>>>> VMIDs are only used for video decoding/encoding and post 
>>>>>>>> processing.
>>>>>>>> With
>>>>>>>> two VCE engines, there is no reason to reserve more than 2 
>>>>>>>> VMIDs for
>>>>>>>> that.
>>>>>>> IIRC the expectation is that we still use the compute queues for 
>>>>>>> post
>>>>>>> processing and not the KFD.
>>>>>>>
>>>>>>> So we will need at least VMIDs for that as well.
>>>>>> Correct. Post processing uses compute queues and VMIDs in the 
>>>>>> GFXHUB.
>>>>>> VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
>>>>>> context. So can't they share the same VMIDs?
>>>>> Ah! Good point, But we still need at least 3 VMID when VMID
>>>>> reservation is active.
>>>> I don't know anything about that VMID reservation feature. What is it
>>>> used for? Who is using it? How many VMIDs can be reserved?
>>>>
>>>> If one VMID is reserved, there would still be one VMID left for video
>>>> post processing. That's not ideal, but I don't think it would be 
>>>> fatal.
>>>> But is it a realistic use case that VMID reservation and ROCm+video
>>>> processing would happen on the same system at the same time?
>>> VMID reservation is used for debugging and yes there can only be one
>>> reserved.
>>>
>>> But I think we need at least two for dynamic assignment or we might
>>> run into a BUG_ON() while giving VMIDs to jobs.
>> I don't see any BUGs or BUG_ONs in amdgpu_ids.c. What should I be
>> looking out for?
>
> We used to have a BUG_ON() when we couldn't find a VMID as alternative 
> if the process already has one but needs to flush it.
>
> But it's a long time ago that I last looked into this.
>
>>> But I certainly need to test this as well. It's possible that 1 VMID
>>> indeed works as expected.
>> I could run the test, if you describe the problematic scenario you have
>> in mind.
>
> Just try to set the available VMIDs to 1 and see if GFX/Compute and MM 
> submission work at the same time from multiple processes.
>
> A few UVD video playbacks at the same time should do the job.

I tested it on Fiji with first_kfd_vmid=2, running 3 instances of VLC 
playing a 1080p h.264 video using VDPAU. (For some reason VA-API is 
broken: vaDeriveImage: operation failed). Just to be sure it was really 
using UVD, I disabled HW acceleration in VLC and saw CPU usage increase. 
Everything seems to be working fine.

Regards,
   Felix


>
> Regards,
> Christian.
>
>>
>> Thanks,
>>    Felix
>>
>>
>>> Regards,
>>> Christian.
>>>
>>>> Thanks,
>>>>     Felix
>>>>
>>>>
>>>>> I don't think you can go below this.
>>>>>
>>>>> Regards,
>>>>> Christian.
>>>>>
>>>>>> Regards,
>>>>>>      Felix
>>>>>>
>>>>>>
>>>>>>>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>>>>>>>> ---
>>>>>>>>      drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>>>>>>>      1 file changed, 8 insertions(+), 3 deletions(-)
>>>>>>>>
>>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>> index 6e10b42c57e5..3470929e5b8e 100644
>>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>>>>>>>          /*
>>>>>>>>           * number of VMs
>>>>>>>>           * VMID 0 is reserved for System
>>>>>>>> -     * amdgpu graphics/compute will use VMIDs 1-7
>>>>>>>> -     * amdkfd will use VMIDs 8-15
>>>>>>>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>>>>>>>> +     * amdkfd will use VMIDs n..15
>>>>>>>> +     *
>>>>>>>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>>>>>>>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 
>>>>>>>> VMIDs
>>>>>>>> +     * for video processing.
>>>>>>>>           */
>>>>>>>> -    adev->vm_manager.first_kfd_vmid = 8;
>>>>>>>> +    adev->vm_manager.first_kfd_vmid =
>>>>>>>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>>>>>>>            amdgpu_vm_manager_init(adev);
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus
  2020-06-25 20:39                 ` Felix Kuehling
@ 2020-06-26  6:57                   ` Christian König
  0 siblings, 0 replies; 12+ messages in thread
From: Christian König @ 2020-06-26  6:57 UTC (permalink / raw)
  To: Felix Kuehling, amd-gfx

Am 25.06.20 um 22:39 schrieb Felix Kuehling:
>
> On 2020-06-25 12:01 p.m., Christian König wrote:
>> Am 25.06.20 um 17:58 schrieb Felix Kuehling:
>>> Am 2020-06-25 um 11:50 a.m. schrieb Christian König:
>>>> Am 25.06.20 um 17:43 schrieb Felix Kuehling:
>>>>> Am 2020-06-25 um 11:38 a.m. schrieb Christian König:
>>>>>> Am 25.06.20 um 17:15 schrieb Felix Kuehling:
>>>>>>> Am 2020-06-25 um 4:19 a.m. schrieb Christian König:
>>>>>>>> Am 25.06.20 um 05:18 schrieb Felix Kuehling:
>>>>>>>>> When there is no graphics support, KFD can use more of the VMIDs.
>>>>>>>>> Graphics
>>>>>>>>> VMIDs are only used for video decoding/encoding and post 
>>>>>>>>> processing.
>>>>>>>>> With
>>>>>>>>> two VCE engines, there is no reason to reserve more than 2 
>>>>>>>>> VMIDs for
>>>>>>>>> that.
>>>>>>>> IIRC the expectation is that we still use the compute queues 
>>>>>>>> for post
>>>>>>>> processing and not the KFD.
>>>>>>>>
>>>>>>>> So we will need at least VMIDs for that as well.
>>>>>>> Correct. Post processing uses compute queues and VMIDs in the 
>>>>>>> GFXHUB.
>>>>>>> VCE uses VMIDs in the MMHUB. I believe in Mesa they use the same VM
>>>>>>> context. So can't they share the same VMIDs?
>>>>>> Ah! Good point, But we still need at least 3 VMID when VMID
>>>>>> reservation is active.
>>>>> I don't know anything about that VMID reservation feature. What is it
>>>>> used for? Who is using it? How many VMIDs can be reserved?
>>>>>
>>>>> If one VMID is reserved, there would still be one VMID left for video
>>>>> post processing. That's not ideal, but I don't think it would be 
>>>>> fatal.
>>>>> But is it a realistic use case that VMID reservation and ROCm+video
>>>>> processing would happen on the same system at the same time?
>>>> VMID reservation is used for debugging and yes there can only be one
>>>> reserved.
>>>>
>>>> But I think we need at least two for dynamic assignment or we might
>>>> run into a BUG_ON() while giving VMIDs to jobs.
>>> I don't see any BUGs or BUG_ONs in amdgpu_ids.c. What should I be
>>> looking out for?
>>
>> We used to have a BUG_ON() when we couldn't find a VMID as 
>> alternative if the process already has one but needs to flush it.
>>
>> But it's a long time ago that I last looked into this.
>>
>>>> But I certainly need to test this as well. It's possible that 1 VMID
>>>> indeed works as expected.
>>> I could run the test, if you describe the problematic scenario you have
>>> in mind.
>>
>> Just try to set the available VMIDs to 1 and see if GFX/Compute and 
>> MM submission work at the same time from multiple processes.
>>
>> A few UVD video playbacks at the same time should do the job.
>
> I tested it on Fiji with first_kfd_vmid=2, running 3 instances of VLC 
> playing a 1080p h.264 video using VDPAU. (For some reason VA-API is 
> broken: vaDeriveImage: operation failed). Just to be sure it was 
> really using UVD, I disabled HW acceleration in VLC and saw CPU usage 
> increase. Everything seems to be working fine.

In this case the patch is Reviewed-by: Christian König 
<christian.koenig@amd.com>

>
> Regards,
>   Felix
>
>
>>
>> Regards,
>> Christian.
>>
>>>
>>> Thanks,
>>>    Felix
>>>
>>>
>>>> Regards,
>>>> Christian.
>>>>
>>>>> Thanks,
>>>>>     Felix
>>>>>
>>>>>
>>>>>> I don't think you can go below this.
>>>>>>
>>>>>> Regards,
>>>>>> Christian.
>>>>>>
>>>>>>> Regards,
>>>>>>>      Felix
>>>>>>>
>>>>>>>
>>>>>>>>> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
>>>>>>>>> ---
>>>>>>>>>      drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++---
>>>>>>>>>      1 file changed, 8 insertions(+), 3 deletions(-)
>>>>>>>>>
>>>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>>> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>>> index 6e10b42c57e5..3470929e5b8e 100644
>>>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>>>>>>>>> @@ -1245,10 +1245,15 @@ static int gmc_v9_0_sw_init(void *handle)
>>>>>>>>>          /*
>>>>>>>>>           * number of VMs
>>>>>>>>>           * VMID 0 is reserved for System
>>>>>>>>> -     * amdgpu graphics/compute will use VMIDs 1-7
>>>>>>>>> -     * amdkfd will use VMIDs 8-15
>>>>>>>>> +     * amdgpu graphics/compute will use VMIDs 1..n-1
>>>>>>>>> +     * amdkfd will use VMIDs n..15
>>>>>>>>> +     *
>>>>>>>>> +     * The first KFD VMID is 8 for GPUs with graphics, 3 for
>>>>>>>>> +     * compute-only GPUs. On compute-only GPUs that leaves 2 
>>>>>>>>> VMIDs
>>>>>>>>> +     * for video processing.
>>>>>>>>>           */
>>>>>>>>> -    adev->vm_manager.first_kfd_vmid = 8;
>>>>>>>>> +    adev->vm_manager.first_kfd_vmid =
>>>>>>>>> +        adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
>>>>>>>>>            amdgpu_vm_manager_init(adev);
>>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, back to index

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-25  3:18 [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment Felix Kuehling
2020-06-25  3:18 ` [PATCH 2/2] drm/amdgpu: Let KFD use more VMIDs on Arcturus Felix Kuehling
2020-06-25  8:19   ` Christian König
2020-06-25 15:15     ` Felix Kuehling
2020-06-25 15:38       ` Christian König
2020-06-25 15:43         ` Felix Kuehling
2020-06-25 15:50           ` Christian König
2020-06-25 15:58             ` Felix Kuehling
2020-06-25 16:01               ` Christian König
2020-06-25 20:39                 ` Felix Kuehling
2020-06-26  6:57                   ` Christian König
2020-06-25  8:18 ` [PATCH 1/2] drm/amdgpu: Clean up KFD VMID assignment Christian König

AMD-GFX Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/amd-gfx/0 amd-gfx/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 amd-gfx amd-gfx/ https://lore.kernel.org/amd-gfx \
		amd-gfx@lists.freedesktop.org
	public-inbox-index amd-gfx

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.freedesktop.lists.amd-gfx


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git