All of lore.kernel.org
 help / color / mirror / Atom feed
* [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
@ 2023-07-10 14:58 Matthew Brost
  2023-07-10 14:58 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
                   ` (6 more replies)
  0 siblings, 7 replies; 19+ messages in thread
From: Matthew Brost @ 2023-07-10 14:58 UTC (permalink / raw)
  To: igt-dev

Update xe_exec_basic which create a NULL binding for store data address,
this store should just be dropped.

Acked-by: Mauro Carvalho Chehab <mchehab@kernel.org>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_exec_basic.c | 33 ++++++++++++++++++++++++++++-----
 1 file changed, 28 insertions(+), 5 deletions(-)

diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
index af581c327..5624d31aa 100644
--- a/tests/xe/xe_exec_basic.c
+++ b/tests/xe/xe_exec_basic.c
@@ -28,6 +28,7 @@
 #define BIND_ENGINE	(0x1 << 4)
 #define DEFER_ALLOC	(0x1 << 5)
 #define DEFER_BIND	(0x1 << 6)
+#define SPARSE		(0x1 << 7)
 
 /**
  * SUBTEST: once-%s
@@ -70,6 +71,10 @@
  * @bindengine-userptr-rebind:		bind engine userptr rebind
  * @bindengine-userptr-invalidate:	bind engine userptr invalidate
  * @bindengine-userptr-invalidate-race:	bind engine userptr invalidate racy
+ * @null:				null
+ * @null-defer-mmap:			null defer mmap
+ * @null-defer-bind:			null defer bind
+ * @null-rebind:			null rebind
  */
 
 static void
@@ -86,6 +91,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		.syncs = to_user_pointer(sync),
 	};
 	uint64_t addr[MAX_N_ENGINES];
+	uint64_t sparse_addr[MAX_N_ENGINES];
 	uint32_t vm[MAX_N_ENGINES];
 	uint32_t engines[MAX_N_ENGINES];
 	uint32_t bind_engines[MAX_N_ENGINES];
@@ -110,8 +116,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 			xe_get_default_alignment(fd));
 
 	addr[0] = 0x1a0000;
-	for (i = 1; i < MAX_N_ENGINES; ++i)
+	sparse_addr[0] = 0x301a0000;
+	for (i = 1; i < MAX_N_ENGINES; ++i) {
 		addr[i] = addr[i - 1] + (0x1ull << 32);
+		sparse_addr[i] = sparse_addr[i - 1] + (0x1ull << 32);
+	}
 
 	if (flags & USERPTR) {
 #define	MAP_ADDRESS	0x00007fadeadbe000
@@ -160,6 +169,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 			xe_vm_bind_userptr_async(fd, vm[i], bind_engines[i],
 						 to_user_pointer(data), addr[i],
 						 bo_size, sync, 1);
+		if (flags & SPARSE)
+			__xe_vm_bind_assert(fd, vm[i], bind_engines[i],
+					    0, 0, sparse_addr[i], bo_size,
+					    XE_VM_BIND_OP_MAP |
+					    XE_VM_BIND_FLAG_ASYNC |
+					    XE_VM_BIND_FLAG_NULL, sync,
+					    1, 0, 0);
 	}
 
 	if (flags & DEFER_BIND)
@@ -171,7 +187,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
 		uint64_t batch_addr = __addr + batch_offset;
 		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
-		uint64_t sdi_addr = __addr + sdi_offset;
+		uint64_t sdi_addr = (flags & SPARSE ? sparse_addr[i % n_vm] :
+				     __addr)+ sdi_offset;
 		int e = i % n_engines;
 
 		b = 0;
@@ -258,9 +275,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 					INT64_MAX, 0, NULL));
 	}
 
-	for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
-	     i < n_execs; i++)
-		igt_assert_eq(data[i].data, 0xc0ffee);
+	if (!(flags & SPARSE)) {
+		for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
+		     i < n_execs; i++)
+			igt_assert_eq(data[i].data, 0xc0ffee);
+	}
 
 	for (i = 0; i < n_engines; i++) {
 		syncobj_destroy(fd, syncobjs[i]);
@@ -293,6 +312,10 @@ igt_main
 		{ "basic-defer-bind", DEFER_ALLOC | DEFER_BIND },
 		{ "userptr", USERPTR },
 		{ "rebind", REBIND },
+		{ "null", SPARSE },
+		{ "null-defer-mmap", SPARSE | DEFER_ALLOC },
+		{ "null-defer-bind", SPARSE | DEFER_ALLOC | DEFER_BIND },
+		{ "null-rebind", SPARSE | REBIND },
 		{ "userptr-rebind", USERPTR | REBIND },
 		{ "userptr-invalidate", USERPTR | INVALIDATE },
 		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
@ 2023-07-10 14:58 ` Matthew Brost
  2023-07-13 15:24   ` Rodrigo Vivi
  2023-07-10 14:58 ` [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 19+ messages in thread
From: Matthew Brost @ 2023-07-10 14:58 UTC (permalink / raw)
  To: igt-dev

GPUVA added support for this let's test it.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 314 insertions(+), 36 deletions(-)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 04d6c3956..434bdb332 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -1249,9 +1249,9 @@ static void *hammer_thread(void *tdata)
 	return NULL;
 }
 
-#define MUNMAP_FLAG_USERPTR		(0x1 << 0)
-#define MUNMAP_FLAG_INVALIDATE		(0x1 << 1)
-#define MUNMAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
+#define MAP_FLAG_USERPTR		(0x1 << 0)
+#define MAP_FLAG_INVALIDATE		(0x1 << 1)
+#define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
 
 
 /**
@@ -1344,7 +1344,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
 	bo_size = page_size * bo_n_pages;
 
-	if (flags & MUNMAP_FLAG_USERPTR) {
+	if (flags & MAP_FLAG_USERPTR) {
 		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
 			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
 			    MAP_ANONYMOUS, -1, 0);
@@ -1363,7 +1363,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	/* Do initial binds */
 	bind_size = (page_size * bo_n_pages) / n_binds;
 	for (i = 0; i < n_binds; ++i) {
-		if (flags & MUNMAP_FLAG_USERPTR)
+		if (flags & MAP_FLAG_USERPTR)
 			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
 						 bind_size, sync, 1);
 		else
@@ -1378,7 +1378,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	 * cause a fault if a rebind occurs during munmap style VM unbind
 	 * (partial VMAs unbound).
 	 */
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		t.fd = fd;
 		t.vm = vm;
 #define PAGE_SIZE	4096
@@ -1437,7 +1437,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 		data = map + i * page_size;
 		igt_assert_eq(data->data, 0xc0ffee);
 	}
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		memset(map, 0, PAGE_SIZE / 2);
 		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
 	} else {
@@ -1487,7 +1487,7 @@ try_again_after_invalidate:
 			igt_assert_eq(data->data, 0xc0ffee);
 		}
 	}
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		memset(map, 0, PAGE_SIZE / 2);
 		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
 	} else {
@@ -1498,7 +1498,7 @@ try_again_after_invalidate:
 	 * The munmap style VM unbind can create new VMAs, make sure those are
 	 * in the bookkeeping for another rebind after a userptr invalidate.
 	 */
-	if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
+	if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
 		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
 			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
 			    MAP_ANONYMOUS, -1, 0);
@@ -1509,7 +1509,7 @@ try_again_after_invalidate:
 	/* Confirm unbound region can be rebound */
 	syncobj_reset(fd, &sync[0].handle, 1);
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
-	if (flags & MUNMAP_FLAG_USERPTR)
+	if (flags & MAP_FLAG_USERPTR)
 		xe_vm_bind_userptr_async(fd, vm, 0,
 					 addr + unbind_n_page_offfset * page_size,
 					 addr + unbind_n_page_offfset * page_size,
@@ -1557,7 +1557,7 @@ try_again_after_invalidate:
 		igt_assert_eq(data->data, 0xc0ffee);
 	}
 
-	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
 		exit = 1;
 		pthread_join(t.thread, NULL);
 		pthread_barrier_destroy(&barrier);
@@ -1572,6 +1572,251 @@ try_again_after_invalidate:
 	xe_vm_destroy(fd, vm);
 }
 
+/**
+ * SUBTEST: mmap-style-bind-%s
+ * Description: Test mmap style unbind with %arg[1]
+ * Run type: FULL
+ * TODO: change ``'Run type' == FULL`` to a better category
+ *
+ * arg[1]:
+ *
+ * @all:				all
+ * @one-partial:			one partial
+ * @either-side-partial:		either side partial
+ * @either-side-full:			either side full
+ * @either-side-partial-hammer:		either side partial hammer
+ * @end:				end
+ * @front:				front
+ * @many-all:				many all
+ * @many-either-side-partial:		many either side partial
+ * @many-either-side-partial-hammer:	many either side partial hammer
+ * @userptr-all:			userptr all
+ * @userptr-one-partial:		userptr one partial
+ * @userptr-either-side-partial:	userptr either side partial
+ * @userptr-either-side-full:		userptr either side full
+ */
+
+static void
+test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
+		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
+		     int unbind_n_pages, unsigned int flags)
+{
+	struct drm_xe_sync sync[2] = {
+		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
+	};
+	struct drm_xe_exec exec = {
+		.num_batch_buffer = 1,
+		.num_syncs = 2,
+		.syncs = to_user_pointer(sync),
+	};
+	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+	uint32_t vm;
+	uint32_t engine;
+	size_t bo_size;
+	uint32_t bo0 = 0, bo1 = 0;
+	uint64_t bind_size;
+	uint64_t page_size = xe_get_default_alignment(fd);
+	struct {
+		uint32_t batch[16];
+		uint64_t pad;
+		uint32_t data;
+	} *data;
+	void *map0, *map1;
+	int i, b;
+	struct thread_data t;
+	pthread_barrier_t barrier;
+	int exit = 0;
+
+	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
+	bo_size = page_size * bo_n_pages;
+
+	if (flags & MAP_FLAG_USERPTR) {
+		map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
+			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
+			    MAP_ANONYMOUS, -1, 0);
+		map1 = mmap(from_user_pointer(addr + bo_size),
+			    bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
+			    MAP_FIXED | MAP_ANONYMOUS, -1, 0);
+		igt_assert(map0 != MAP_FAILED);
+		igt_assert(map1 != MAP_FAILED);
+	} else {
+		bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+		map0 = xe_bo_map(fd, bo0, bo_size);
+		bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+		map1 = xe_bo_map(fd, bo1, bo_size);
+	}
+	memset(map0, 0, bo_size);
+	memset(map1, 0, bo_size);
+
+	engine = xe_engine_create(fd, vm, eci, 0);
+
+	sync[0].handle = syncobj_create(fd, 0);
+	sync[1].handle = syncobj_create(fd, 0);
+
+	/* Do initial binds */
+	bind_size = (page_size * bo_n_pages) / n_binds;
+	for (i = 0; i < n_binds; ++i) {
+		if (flags & MAP_FLAG_USERPTR)
+			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
+						 bind_size, sync, 1);
+		else
+			xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
+					 addr, bind_size, sync, 1);
+		addr += bind_size;
+	}
+	addr = base_addr;
+
+	/*
+	 * Kick a thread to write the first page continously to ensure we can't
+	 * cause a fault if a rebind occurs during munmap style VM unbind
+	 * (partial VMAs unbound).
+	 */
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		t.fd = fd;
+		t.vm = vm;
+#define PAGE_SIZE	4096
+		t.addr = addr + PAGE_SIZE / 2;
+		t.eci = eci;
+		t.exit = &exit;
+		t.map = map0 + PAGE_SIZE / 2;
+		t.barrier = &barrier;
+		pthread_barrier_init(&barrier, NULL, 2);
+		pthread_create(&t.thread, 0, hammer_thread, &t);
+		pthread_barrier_wait(&barrier);
+	}
+
+	/* Verify we can use every page */
+	for (i = 0; i < n_binds; ++i) {
+		uint64_t batch_offset = (char *)&data->batch - (char *)data;
+		uint64_t batch_addr = addr + batch_offset;
+		uint64_t sdi_offset = (char *)&data->data - (char *)data;
+		uint64_t sdi_addr = addr + sdi_offset;
+		data = map0 + i * page_size;
+
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+		if (i)
+			syncobj_reset(fd, &sync[1].handle, 1);
+		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+		exec.engine_id = engine;
+		exec.address = batch_addr;
+		xe_exec(fd, &exec);
+
+		addr += page_size;
+	}
+	addr = base_addr;
+
+	/* Bind some of the pages to different BO / userptr */
+	syncobj_reset(fd, &sync[0].handle, 1);
+	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
+	if (flags & MAP_FLAG_USERPTR)
+		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
+					 unbind_n_page_offfset * page_size,
+					 addr + unbind_n_page_offfset * page_size,
+					 unbind_n_pages * page_size, sync, 2);
+	else
+		xe_vm_bind_async(fd, vm, 0, bo1,
+				 unbind_n_page_offfset * page_size,
+				 addr + unbind_n_page_offfset * page_size,
+				 unbind_n_pages * page_size, sync, 2);
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+	/* Verify all pages written */
+	for (i = 0; i < n_binds; ++i) {
+		data = map0 + i * page_size;
+		igt_assert_eq(data->data, 0xc0ffee);
+	}
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		memset(map0, 0, PAGE_SIZE / 2);
+		memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
+	} else {
+		memset(map0, 0, bo_size);
+		memset(map1, 0, bo_size);
+	}
+
+	/* Verify we can use every page */
+	for (i = 0; i < n_binds; ++i) {
+		uint64_t batch_offset = (char *)&data->batch - (char *)data;
+		uint64_t batch_addr = addr + batch_offset;
+		uint64_t sdi_offset = (char *)&data->data - (char *)data;
+		uint64_t sdi_addr = addr + sdi_offset;
+
+		data = map0 + i * page_size;
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		data = map1 + i * page_size;
+		b = 0;
+		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+		data->batch[b++] = sdi_addr;
+		data->batch[b++] = sdi_addr >> 32;
+		data->batch[b++] = 0xc0ffee;
+		data->batch[b++] = MI_BATCH_BUFFER_END;
+		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+
+		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
+		if (i)
+			syncobj_reset(fd, &sync[1].handle, 1);
+		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
+
+		exec.engine_id = engine;
+		exec.address = batch_addr;
+		xe_exec(fd, &exec);
+
+		addr += page_size;
+	}
+	addr = base_addr;
+
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
+
+	/* Verify all pages written */
+	for (i = 0; i < n_binds; ++i) {
+		uint32_t result = 0;
+
+		data = map0 + i * page_size;
+		result |= data->data;
+
+		data = map1 + i * page_size;
+		result |= data->data;
+
+		igt_assert_eq(result, 0xc0ffee);
+	}
+
+	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
+		exit = 1;
+		pthread_join(t.thread, NULL);
+		pthread_barrier_destroy(&barrier);
+	}
+
+	syncobj_destroy(fd, sync[0].handle);
+	syncobj_destroy(fd, sync[1].handle);
+	xe_engine_destroy(fd, engine);
+	munmap(map0, bo_size);
+	munmap(map1, bo_size);
+	if (bo0)
+		gem_close(fd, bo0);
+	if (bo1)
+		gem_close(fd, bo1);
+	xe_vm_destroy(fd, vm);
+}
+
 igt_main
 {
 	struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
@@ -1584,55 +1829,74 @@ igt_main
 		int unbind_n_page_offfset;
 		int unbind_n_pages;
 		unsigned int flags;
-	} sections[] = {
+	} munmap_sections[] = {
 		{ "all", 4, 2, 0, 4, 0 },
 		{ "one-partial", 4, 1, 1, 2, 0 },
 		{ "either-side-partial", 4, 2, 1, 2, 0 },
 		{ "either-side-partial-hammer", 4, 2, 1, 2,
-			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+			MAP_FLAG_HAMMER_FIRST_PAGE },
 		{ "either-side-full", 4, 4, 1, 2, 0 },
 		{ "end", 4, 2, 0, 3, 0 },
 		{ "front", 4, 2, 1, 3, 0 },
 		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
 		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
 		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
-			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
+			MAP_FLAG_HAMMER_FIRST_PAGE },
 		{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
 		{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
 		{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
-		{ "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
-		{ "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
+		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
 		{ "userptr-either-side-partial", 4, 2, 1, 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-either-side-full", 4, 4, 1, 2,
-			MUNMAP_FLAG_USERPTR },
-		{ "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
-		{ "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
+		{ "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
+		{ "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
 		{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR },
+			MAP_FLAG_USERPTR },
 		{ "userptr-inval-either-side-full", 4, 4, 1, 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
-		{ "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
-			MUNMAP_FLAG_INVALIDATE },
-		{ "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
-			MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+		{ "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
+			MAP_FLAG_INVALIDATE },
+		{ "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
+			MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
-			4 * 8 - 2, MUNMAP_FLAG_USERPTR |
-				MUNMAP_FLAG_INVALIDATE },
+			4 * 8 - 2, MAP_FLAG_USERPTR |
+				MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
-			2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
 		{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
-			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
+			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
+		{ NULL },
+	};
+	const struct section mmap_sections[] = {
+		{ "all", 4, 2, 0, 4, 0 },
+		{ "one-partial", 4, 1, 1, 2, 0 },
+		{ "either-side-partial", 4, 2, 1, 2, 0 },
+		{ "either-side-full", 4, 4, 1, 2, 0 },
+		{ "either-side-partial-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "end", 4, 2, 0, 3, 0 },
+		{ "front", 4, 2, 1, 3, 0 },
+		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
+		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
+		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
+		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
+		{ "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
+		{ "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
 		{ NULL },
 	};
 
@@ -1839,7 +2103,7 @@ igt_main
 			break;
 		}
 
-	for (const struct section *s = sections; s->name; s++) {
+	for (const struct section *s = munmap_sections; s->name; s++) {
 		igt_subtest_f("munmap-style-unbind-%s", s->name) {
 			igt_require_f(hwe_non_copy,
 				      "Requires non-copy engine to run\n");
@@ -1853,6 +2117,20 @@ igt_main
 		}
 	}
 
+	for (const struct section *s = mmap_sections; s->name; s++) {
+		igt_subtest_f("mmap-style-bind-%s", s->name) {
+			igt_require_f(hwe_non_copy,
+				      "Requires non-copy engine to run\n");
+
+			test_mmap_style_bind(fd, hwe_non_copy,
+					     s->bo_n_pages,
+					     s->n_binds,
+					     s->unbind_n_page_offfset,
+					     s->unbind_n_pages,
+					     s->flags);
+		}
+	}
+
 	igt_fixture
 		drm_close_driver(fd);
 }
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
  2023-07-10 14:58 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
@ 2023-07-10 14:58 ` Matthew Brost
  2023-07-13 15:30   ` Rodrigo Vivi
  2023-07-10 14:58 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 19+ messages in thread
From: Matthew Brost @ 2023-07-10 14:58 UTC (permalink / raw)
  To: igt-dev

Splitting large pages involves using dma-resv slots for ordering, make
sure this works.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 81 +++++++++++++++++++++++++++++++++++-------------
 1 file changed, 60 insertions(+), 21 deletions(-)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 434bdb332..5fd511f0b 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -1252,7 +1252,8 @@ static void *hammer_thread(void *tdata)
 #define MAP_FLAG_USERPTR		(0x1 << 0)
 #define MAP_FLAG_INVALIDATE		(0x1 << 1)
 #define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
-
+#define MAP_FLAG_LARGE_PAGE		(0x1 << 3)
+#define MAP_FLAG_LARGE_PAGE_NO_SPLIT	(0x1 << 4)
 
 /**
  * SUBTEST: munmap-style-unbind-%s
@@ -1305,12 +1306,16 @@ static void *hammer_thread(void *tdata)
  *					userptr inval many either side full
  * @userptr-inval-many-end:		userptr inval many end
  * @userptr-inval-many-front:		userptr inval many front
+ * @either-side-partial-large-page-hammer:
+ *					either side partial large page hammer
+ * @either-side-partial-split-page-hammer:
+ *					either side partial split page hammer
  */
 
 static void
 test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 			 int bo_n_pages, int n_binds,
-			 int unbind_n_page_offfset, int unbind_n_pages,
+			 int unbind_n_page_offset, int unbind_n_pages,
 			 unsigned int flags)
 {
 	struct drm_xe_sync sync[2] = {
@@ -1322,7 +1327,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 		.num_syncs = 2,
 		.syncs = to_user_pointer(sync),
 	};
-	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
 	uint32_t vm;
 	uint32_t engine;
 	size_t bo_size;
@@ -1340,6 +1345,14 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	struct thread_data t;
 	pthread_barrier_t barrier;
 	int exit = 0;
+	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
+
+	if (flags & MAP_FLAG_LARGE_PAGE) {
+		bo_n_pages *= n_page_per_2mb;
+		unbind_n_pages *= n_page_per_2mb;
+		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
+			unbind_n_page_offset *= n_page_per_2mb;
+	}
 
 	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
 	bo_size = page_size * bo_n_pages;
@@ -1426,7 +1439,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
 	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
 	xe_vm_unbind_async(fd, vm, 0, 0,
-			   addr + unbind_n_page_offfset * page_size,
+			   addr + unbind_n_page_offset * page_size,
 			   unbind_n_pages * page_size, sync, 2);
 
 	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
@@ -1455,8 +1468,8 @@ try_again_after_invalidate:
 		data = map + i * page_size;
 		addr += page_size;
 
-		if (i < unbind_n_page_offfset ||
-		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
+		if (i < unbind_n_page_offset ||
+		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
 			b = 0;
 			data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
 			data->batch[b++] = sdi_addr;
@@ -1481,8 +1494,8 @@ try_again_after_invalidate:
 
 	/* Verify all pages still bound written */
 	for (i = 0; i < n_binds; ++i) {
-		if (i < unbind_n_page_offfset ||
-		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
+		if (i < unbind_n_page_offset ||
+		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
 			data = map + i * page_size;
 			igt_assert_eq(data->data, 0xc0ffee);
 		}
@@ -1511,13 +1524,13 @@ try_again_after_invalidate:
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
 	if (flags & MAP_FLAG_USERPTR)
 		xe_vm_bind_userptr_async(fd, vm, 0,
-					 addr + unbind_n_page_offfset * page_size,
-					 addr + unbind_n_page_offfset * page_size,
+					 addr + unbind_n_page_offset * page_size,
+					 addr + unbind_n_page_offset * page_size,
 					 unbind_n_pages * page_size, sync, 1);
 	else
 		xe_vm_bind_async(fd, vm, 0, bo,
-				 unbind_n_page_offfset * page_size,
-				 addr + unbind_n_page_offfset * page_size,
+				 unbind_n_page_offset * page_size,
+				 addr + unbind_n_page_offset * page_size,
 				 unbind_n_pages * page_size, sync, 1);
 
 	/* Verify we can use every page */
@@ -1594,11 +1607,15 @@ try_again_after_invalidate:
  * @userptr-one-partial:		userptr one partial
  * @userptr-either-side-partial:	userptr either side partial
  * @userptr-either-side-full:		userptr either side full
+ * @either-side-partial-large-page-hammer:
+ *					either side partial large page hammer
+ * @either-side-partial-split-page-hammer:
+ *					either side partial split page hammer
  */
 
 static void
 test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
-		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
+		     int bo_n_pages, int n_binds, int unbind_n_page_offset,
 		     int unbind_n_pages, unsigned int flags)
 {
 	struct drm_xe_sync sync[2] = {
@@ -1610,7 +1627,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
 		.num_syncs = 2,
 		.syncs = to_user_pointer(sync),
 	};
-	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
 	uint32_t vm;
 	uint32_t engine;
 	size_t bo_size;
@@ -1627,6 +1644,14 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
 	struct thread_data t;
 	pthread_barrier_t barrier;
 	int exit = 0;
+	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
+
+	if (flags & MAP_FLAG_LARGE_PAGE) {
+		bo_n_pages *= n_page_per_2mb;
+		unbind_n_pages *= n_page_per_2mb;
+		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
+			unbind_n_page_offset *= n_page_per_2mb;
+	}
 
 	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
 	bo_size = page_size * bo_n_pages;
@@ -1721,13 +1746,13 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
 	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
 	if (flags & MAP_FLAG_USERPTR)
 		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
-					 unbind_n_page_offfset * page_size,
-					 addr + unbind_n_page_offfset * page_size,
+					 unbind_n_page_offset * page_size,
+					 addr + unbind_n_page_offset * page_size,
 					 unbind_n_pages * page_size, sync, 2);
 	else
 		xe_vm_bind_async(fd, vm, 0, bo1,
-				 unbind_n_page_offfset * page_size,
-				 addr + unbind_n_page_offfset * page_size,
+				 unbind_n_page_offset * page_size,
+				 addr + unbind_n_page_offset * page_size,
 				 unbind_n_pages * page_size, sync, 2);
 	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
 	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
@@ -1826,7 +1851,7 @@ igt_main
 		const char *name;
 		int bo_n_pages;
 		int n_binds;
-		int unbind_n_page_offfset;
+		int unbind_n_page_offset;
 		int unbind_n_pages;
 		unsigned int flags;
 	} munmap_sections[] = {
@@ -1835,6 +1860,13 @@ igt_main
 		{ "either-side-partial", 4, 2, 1, 2, 0 },
 		{ "either-side-partial-hammer", 4, 2, 1, 2,
 			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE },
+		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE |
+			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
 		{ "either-side-full", 4, 4, 1, 2, 0 },
 		{ "end", 4, 2, 0, 3, 0 },
 		{ "front", 4, 2, 1, 3, 0 },
@@ -1887,6 +1919,13 @@ igt_main
 		{ "either-side-full", 4, 4, 1, 2, 0 },
 		{ "either-side-partial-hammer", 4, 2, 1, 2,
 			MAP_FLAG_HAMMER_FIRST_PAGE },
+		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE },
+		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
+			MAP_FLAG_HAMMER_FIRST_PAGE |
+			MAP_FLAG_LARGE_PAGE |
+			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
 		{ "end", 4, 2, 0, 3, 0 },
 		{ "front", 4, 2, 1, 3, 0 },
 		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
@@ -2111,7 +2150,7 @@ igt_main
 			test_munmap_style_unbind(fd, hwe_non_copy,
 						 s->bo_n_pages,
 						 s->n_binds,
-						 s->unbind_n_page_offfset,
+						 s->unbind_n_page_offset,
 						 s->unbind_n_pages,
 						 s->flags);
 		}
@@ -2125,7 +2164,7 @@ igt_main
 			test_mmap_style_bind(fd, hwe_non_copy,
 					     s->bo_n_pages,
 					     s->n_binds,
-					     s->unbind_n_page_offfset,
+					     s->unbind_n_page_offset,
 					     s->unbind_n_pages,
 					     s->flags);
 		}
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
  2023-07-10 14:58 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
  2023-07-10 14:58 ` [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
@ 2023-07-10 14:58 ` Matthew Brost
  2023-07-13 15:32   ` Rodrigo Vivi
  2023-07-10 14:58 ` [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section Matthew Brost
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 19+ messages in thread
From: Matthew Brost @ 2023-07-10 14:58 UTC (permalink / raw)
  To: igt-dev

Exercises the maple tree dep tracker logic.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 5fd511f0b..36cd80357 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -811,6 +811,12 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 				NULL));
 	igt_assert_eq(data[0].data, 0xc0ffee);
 
+	syncobj_destroy(fd, sync[0].handle);
+	sync[0].handle = syncobj_create(fd, 0);
+	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+	xe_vm_unbind_all_async(fd, vm, 0, bo, sync, 1);
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+
 	syncobj_destroy(fd, sync[0].handle);
 	for (i = 0; i < N_ENGINES; i++) {
 		syncobj_destroy(fd, syncobjs[i]);
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
                   ` (2 preceding siblings ...)
  2023-07-10 14:58 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
@ 2023-07-10 14:58 ` Matthew Brost
  2023-07-13 15:34   ` Rodrigo Vivi
  2023-07-10 16:09 ` [igt-dev] ✗ Fi.CI.BAT: failure for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Patchwork
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 19+ messages in thread
From: Matthew Brost @ 2023-07-10 14:58 UTC (permalink / raw)
  To: igt-dev

Verify bind engines can't race and corrupt page tables.

Signed-of-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 43 ++++++++++++++++++++++++++++++++++++-------
 1 file changed, 36 insertions(+), 7 deletions(-)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 36cd80357..315cb7009 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -689,10 +689,18 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
  * Description: Test independent bind engines
  * Functionality: bind engines
  * Run type: BAT
+ *
+ * SUBTEST: bind-engines-conflict
+ * Description: Test conflict bind engines
+ * Functionality: bind engines
+ * Run type: BAT
  */
 
+#define CONFLICT	(0x1 << 0)
+
 static void
-test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
+test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
+			      unsigned int flags)
 {
 	uint32_t vm;
 	uint64_t addr = 0x1a0000;
@@ -759,7 +767,7 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 			xe_spin_wait_started(&data[i].spin);
 
 			/* Do bind to 1st engine blocked on cork */
-			addr += bo_size;
+			addr += (flags & CONFLICT) ? (0x1 << 21) : bo_size;
 			sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
 			sync[1].handle = syncobjs[e];
 			xe_vm_bind_async(fd, vm, bind_engines[e], bo, 0, addr,
@@ -795,10 +803,20 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 		xe_exec(fd, &exec);
 	}
 
-	/* Verify initial bind, bind + write to 2nd engine done */
-	igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0, NULL));
-	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
-	igt_assert_eq(data[1].data, 0xc0ffee);
+	if (!(flags & CONFLICT)) {
+		/* Verify initial bind, bind + write to 2nd engine done */
+		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
+					NULL));
+		igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0,
+					NULL));
+		igt_assert_eq(data[1].data, 0xc0ffee);
+	} else {
+		/* Let jobs runs for a bit */
+		usleep(100000);
+		/* bind + write to 2nd engine waiting */
+		igt_assert(!syncobj_wait(fd, &syncobjs[1], 1, 1, 0, NULL));
+		igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
+	}
 
 	/* Verify bind + write to 1st engine still inflight */
 	igt_assert(!syncobj_wait(fd, &syncobjs[0], 1, 1, 0, NULL));
@@ -811,6 +829,13 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 				NULL));
 	igt_assert_eq(data[0].data, 0xc0ffee);
 
+	if (flags & CONFLICT) {
+		/* Verify bind + write to 2nd engine done */
+		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
+					NULL));
+		igt_assert_eq(data[1].data, 0xc0ffee);
+	}
+
 	syncobj_destroy(fd, sync[0].handle);
 	sync[0].handle = syncobj_create(fd, 0);
 	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
@@ -2001,7 +2026,11 @@ igt_main
 
 	igt_subtest("bind-engines-independent")
 		xe_for_each_hw_engine(fd, hwe)
-			test_bind_engines_independent(fd, hwe);
+			test_bind_engines_independent(fd, hwe, 0);
+
+	igt_subtest("bind-engines-conflict")
+		xe_for_each_hw_engine(fd, hwe)
+			test_bind_engines_independent(fd, hwe, CONFLICT);
 
 	igt_subtest("bind-array-twice")
 		xe_for_each_hw_engine(fd, hwe)
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [igt-dev] ✗ Fi.CI.BAT: failure for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
                   ` (3 preceding siblings ...)
  2023-07-10 14:58 ` [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section Matthew Brost
@ 2023-07-10 16:09 ` Patchwork
  2023-07-10 16:46 ` [igt-dev] ○ CI.xeBAT: info " Patchwork
  2023-07-13 15:19 ` [igt-dev] [PATCH 1/5] " Rodrigo Vivi
  6 siblings, 0 replies; 19+ messages in thread
From: Patchwork @ 2023-07-10 16:09 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 13732 bytes --]

== Series Details ==

Series: series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
URL   : https://patchwork.freedesktop.org/series/120471/
State : failure

== Summary ==

CI Bug Log - changes from IGT_7378 -> IGTPW_9377
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with IGTPW_9377 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in IGTPW_9377, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/index.html

Participating hosts (41 -> 40)
------------------------------

  Additional (1): bat-rpls-2 
  Missing    (2): fi-kbl-soraka fi-snb-2520m 

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in IGTPW_9377:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live@gt_heartbeat:
    - bat-adls-5:         [PASS][1] -> [DMESG-FAIL][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/bat-adls-5/igt@i915_selftest@live@gt_heartbeat.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-adls-5/igt@i915_selftest@live@gt_heartbeat.html

  
#### Warnings ####

  * igt@i915_pm_rpm@basic-pci-d3-state:
    - fi-cfl-8700k:       [FAIL][3] ([i915#7940]) -> [FAIL][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/fi-cfl-8700k/igt@i915_pm_rpm@basic-pci-d3-state.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/fi-cfl-8700k/igt@i915_pm_rpm@basic-pci-d3-state.html

  
Known issues
------------

  Here are the changes found in IGTPW_9377 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@debugfs_test@basic-hwmon:
    - bat-rpls-2:         NOTRUN -> [SKIP][5] ([i915#7456])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@debugfs_test@basic-hwmon.html

  * igt@fbdev@info:
    - bat-rpls-2:         NOTRUN -> [SKIP][6] ([i915#1849] / [i915#2582])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@fbdev@info.html

  * igt@fbdev@read:
    - bat-rpls-2:         NOTRUN -> [SKIP][7] ([i915#2582]) +3 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@fbdev@read.html

  * igt@gem_lmem_swapping@parallel-random-engines:
    - bat-jsl-1:          NOTRUN -> [SKIP][8] ([i915#4613]) +3 similar issues
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-jsl-1/igt@gem_lmem_swapping@parallel-random-engines.html

  * igt@gem_lmem_swapping@verify-random:
    - bat-rpls-2:         NOTRUN -> [SKIP][9] ([i915#4613]) +3 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@gem_lmem_swapping@verify-random.html

  * igt@gem_tiled_pread_basic:
    - bat-rpls-2:         NOTRUN -> [SKIP][10] ([i915#3282])
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@gem_tiled_pread_basic.html

  * igt@i915_pm_backlight@basic-brightness:
    - bat-rpls-2:         NOTRUN -> [SKIP][11] ([i915#7561])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@i915_pm_backlight@basic-brightness.html

  * igt@i915_pm_rpm@basic-rte:
    - fi-skl-guc:         [PASS][12] -> [FAIL][13] ([i915#7940])
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/fi-skl-guc/igt@i915_pm_rpm@basic-rte.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/fi-skl-guc/igt@i915_pm_rpm@basic-rte.html

  * igt@i915_pm_rps@basic-api:
    - bat-rpls-2:         NOTRUN -> [SKIP][14] ([i915#6621])
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@i915_pm_rps@basic-api.html

  * igt@i915_selftest@live@gt_mocs:
    - bat-mtlp-8:         [PASS][15] -> [DMESG-FAIL][16] ([i915#7059])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/bat-mtlp-8/igt@i915_selftest@live@gt_mocs.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-mtlp-8/igt@i915_selftest@live@gt_mocs.html
    - bat-rpls-2:         NOTRUN -> [DMESG-FAIL][17] ([i915#7059])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@i915_selftest@live@gt_mocs.html

  * igt@i915_selftest@live@gt_pm:
    - bat-rpls-2:         NOTRUN -> [DMESG-FAIL][18] ([i915#4258] / [i915#7913])
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@i915_selftest@live@gt_pm.html

  * igt@i915_selftest@live@slpc:
    - bat-rpls-2:         NOTRUN -> [DMESG-WARN][19] ([i915#6367])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@i915_selftest@live@slpc.html

  * igt@kms_busy@basic:
    - bat-rpls-2:         NOTRUN -> [SKIP][20] ([i915#1845]) +15 similar issues
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@kms_busy@basic.html

  * igt@kms_chamelium_edid@hdmi-edid-read:
    - bat-rpls-2:         NOTRUN -> [SKIP][21] ([i915#7828]) +8 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@kms_chamelium_edid@hdmi-edid-read.html

  * igt@kms_chamelium_hpd@common-hpd-after-suspend:
    - bat-adlm-1:         NOTRUN -> [SKIP][22] ([i915#7828])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-adlm-1/igt@kms_chamelium_hpd@common-hpd-after-suspend.html
    - bat-jsl-1:          NOTRUN -> [SKIP][23] ([i915#7828])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-jsl-1/igt@kms_chamelium_hpd@common-hpd-after-suspend.html

  * igt@kms_flip@basic-flip-vs-dpms:
    - bat-rpls-2:         NOTRUN -> [SKIP][24] ([i915#3637]) +3 similar issues
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@kms_flip@basic-flip-vs-dpms.html

  * igt@kms_force_connector_basic@force-load-detect:
    - bat-rpls-2:         NOTRUN -> [SKIP][25] ([fdo#109285])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@kms_force_connector_basic@force-load-detect.html

  * igt@kms_frontbuffer_tracking@basic:
    - bat-rpls-2:         NOTRUN -> [SKIP][26] ([i915#1849])
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@kms_frontbuffer_tracking@basic.html

  * igt@kms_pipe_crc_basic@suspend-read-crc:
    - bat-adlm-1:         NOTRUN -> [SKIP][27] ([i915#1845])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-adlm-1/igt@kms_pipe_crc_basic@suspend-read-crc.html

  * igt@kms_psr@sprite_plane_onoff:
    - bat-rpls-2:         NOTRUN -> [SKIP][28] ([i915#1072]) +3 similar issues
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@kms_psr@sprite_plane_onoff.html

  * igt@kms_setmode@basic-clone-single-crtc:
    - bat-rpls-2:         NOTRUN -> [SKIP][29] ([i915#3555])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@kms_setmode@basic-clone-single-crtc.html

  * igt@prime_vgem@basic-fence-flip:
    - bat-rpls-2:         NOTRUN -> [SKIP][30] ([fdo#109295] / [i915#1845] / [i915#3708])
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@prime_vgem@basic-fence-flip.html

  * igt@prime_vgem@basic-write:
    - bat-rpls-2:         NOTRUN -> [SKIP][31] ([fdo#109295] / [i915#3708]) +2 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rpls-2/igt@prime_vgem@basic-write.html

  
#### Possible fixes ####

  * igt@i915_module_load@reload:
    - bat-jsl-1:          [INCOMPLETE][32] -> [PASS][33]
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/bat-jsl-1/igt@i915_module_load@reload.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-jsl-1/igt@i915_module_load@reload.html

  * igt@i915_pm_rpm@module-reload:
    - fi-kbl-7567u:       [FAIL][34] ([i915#7940]) -> [PASS][35]
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/fi-kbl-7567u/igt@i915_pm_rpm@module-reload.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/fi-kbl-7567u/igt@i915_pm_rpm@module-reload.html

  * igt@i915_selftest@live@requests:
    - bat-mtlp-6:         [DMESG-FAIL][36] ([i915#7269]) -> [PASS][37]
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/bat-mtlp-6/igt@i915_selftest@live@requests.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-mtlp-6/igt@i915_selftest@live@requests.html

  * igt@i915_selftest@live@slpc:
    - bat-mtlp-8:         [DMESG-WARN][38] ([i915#6367]) -> [PASS][39]
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/bat-mtlp-8/igt@i915_selftest@live@slpc.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-mtlp-8/igt@i915_selftest@live@slpc.html

  * igt@i915_selftest@live@workarounds:
    - bat-adlm-1:         [INCOMPLETE][40] ([i915#4983] / [i915#7677]) -> [PASS][41]
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/bat-adlm-1/igt@i915_selftest@live@workarounds.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-adlm-1/igt@i915_selftest@live@workarounds.html

  
#### Warnings ####

  * igt@kms_psr@primary_page_flip:
    - bat-rplp-1:         [SKIP][42] ([i915#1072]) -> [ABORT][43] ([i915#8442] / [i915#8668])
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_7378/bat-rplp-1/igt@kms_psr@primary_page_flip.html
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/bat-rplp-1/igt@kms_psr@primary_page_flip.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
  [fdo#109295]: https://bugs.freedesktop.org/show_bug.cgi?id=109295
  [i915#1072]: https://gitlab.freedesktop.org/drm/intel/issues/1072
  [i915#1845]: https://gitlab.freedesktop.org/drm/intel/issues/1845
  [i915#1849]: https://gitlab.freedesktop.org/drm/intel/issues/1849
  [i915#2582]: https://gitlab.freedesktop.org/drm/intel/issues/2582
  [i915#3282]: https://gitlab.freedesktop.org/drm/intel/issues/3282
  [i915#3555]: https://gitlab.freedesktop.org/drm/intel/issues/3555
  [i915#3637]: https://gitlab.freedesktop.org/drm/intel/issues/3637
  [i915#3708]: https://gitlab.freedesktop.org/drm/intel/issues/3708
  [i915#4258]: https://gitlab.freedesktop.org/drm/intel/issues/4258
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#4983]: https://gitlab.freedesktop.org/drm/intel/issues/4983
  [i915#6367]: https://gitlab.freedesktop.org/drm/intel/issues/6367
  [i915#6621]: https://gitlab.freedesktop.org/drm/intel/issues/6621
  [i915#7059]: https://gitlab.freedesktop.org/drm/intel/issues/7059
  [i915#7269]: https://gitlab.freedesktop.org/drm/intel/issues/7269
  [i915#7456]: https://gitlab.freedesktop.org/drm/intel/issues/7456
  [i915#7561]: https://gitlab.freedesktop.org/drm/intel/issues/7561
  [i915#7677]: https://gitlab.freedesktop.org/drm/intel/issues/7677
  [i915#7828]: https://gitlab.freedesktop.org/drm/intel/issues/7828
  [i915#7913]: https://gitlab.freedesktop.org/drm/intel/issues/7913
  [i915#7940]: https://gitlab.freedesktop.org/drm/intel/issues/7940
  [i915#8442]: https://gitlab.freedesktop.org/drm/intel/issues/8442
  [i915#8668]: https://gitlab.freedesktop.org/drm/intel/issues/8668


Build changes
-------------

  * CI: CI-20190529 -> None
  * IGT: IGT_7378 -> IGTPW_9377

  CI-20190529: 20190529
  CI_DRM_13364: 91010bd5ed0a15e689f0fcc3817489bd02433884 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_9377: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/index.html
  IGT_7378: e78963553e05a2413cf735824517547b2bb19936 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git


Testlist changes
----------------

+igt@xe_exec_basic@many-engines-many-vm-null
+igt@xe_exec_basic@many-engines-many-vm-null-defer-bind
+igt@xe_exec_basic@many-engines-many-vm-null-defer-mmap
+igt@xe_exec_basic@many-engines-many-vm-null-rebind
+igt@xe_exec_basic@many-engines-null
+igt@xe_exec_basic@many-engines-null-defer-bind
+igt@xe_exec_basic@many-engines-null-defer-mmap
+igt@xe_exec_basic@many-engines-null-rebind
+igt@xe_exec_basic@many-null
+igt@xe_exec_basic@many-null-defer-bind
+igt@xe_exec_basic@many-null-defer-mmap
+igt@xe_exec_basic@many-null-rebind
+igt@xe_exec_basic@no-exec-null
+igt@xe_exec_basic@no-exec-null-defer-bind
+igt@xe_exec_basic@no-exec-null-defer-mmap
+igt@xe_exec_basic@no-exec-null-rebind
+igt@xe_exec_basic@once-null
+igt@xe_exec_basic@once-null-defer-bind
+igt@xe_exec_basic@once-null-defer-mmap
+igt@xe_exec_basic@once-null-rebind
+igt@xe_exec_basic@twice-null
+igt@xe_exec_basic@twice-null-defer-bind
+igt@xe_exec_basic@twice-null-defer-mmap
+igt@xe_exec_basic@twice-null-rebind
+igt@xe_vm@bind-engines-conflict
+igt@xe_vm@mmap-style-bind-all
+igt@xe_vm@mmap-style-bind-either-side-full
+igt@xe_vm@mmap-style-bind-either-side-partial
+igt@xe_vm@mmap-style-bind-either-side-partial-hammer
+igt@xe_vm@mmap-style-bind-either-side-partial-large-page-hammer
+igt@xe_vm@mmap-style-bind-either-side-partial-split-page-hammer
+igt@xe_vm@mmap-style-bind-end
+igt@xe_vm@mmap-style-bind-front
+igt@xe_vm@mmap-style-bind-many-all
+igt@xe_vm@mmap-style-bind-many-either-side-partial
+igt@xe_vm@mmap-style-bind-many-either-side-partial-hammer
+igt@xe_vm@mmap-style-bind-one-partial
+igt@xe_vm@mmap-style-bind-userptr-all
+igt@xe_vm@mmap-style-bind-userptr-either-side-full
+igt@xe_vm@mmap-style-bind-userptr-either-side-partial
+igt@xe_vm@mmap-style-bind-userptr-one-partial
+igt@xe_vm@munmap-style-unbind-either-side-partial-large-page-hammer
+igt@xe_vm@munmap-style-unbind-either-side-partial-split-page-hammer

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_9377/index.html

[-- Attachment #2: Type: text/html, Size: 16356 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [igt-dev] ○ CI.xeBAT: info for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
                   ` (4 preceding siblings ...)
  2023-07-10 16:09 ` [igt-dev] ✗ Fi.CI.BAT: failure for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Patchwork
@ 2023-07-10 16:46 ` Patchwork
  2023-07-13 15:19 ` [igt-dev] [PATCH 1/5] " Rodrigo Vivi
  6 siblings, 0 replies; 19+ messages in thread
From: Patchwork @ 2023-07-10 16:46 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 392 bytes --]

== Series Details ==

Series: series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
URL   : https://patchwork.freedesktop.org/series/120471/
State : info

== Summary ==

Participating hosts:
"bat-pvc-2n""bat-atsm-2n""bat-dg2-oem2n""bat-adlp-7n"Missing hosts results[0]:
Results: [IGTPW_9377](https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_9377/index.html)



[-- Attachment #2: Type: text/html, Size: 884 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
  2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
                   ` (5 preceding siblings ...)
  2023-07-10 16:46 ` [igt-dev] ○ CI.xeBAT: info " Patchwork
@ 2023-07-13 15:19 ` Rodrigo Vivi
  2023-07-14 18:00   ` Matthew Brost
  6 siblings, 1 reply; 19+ messages in thread
From: Rodrigo Vivi @ 2023-07-13 15:19 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Mon, Jul 10, 2023 at 07:58:52AM -0700, Matthew Brost wrote:
> Update xe_exec_basic which create a NULL binding for store data address,
> this store should just be dropped.
> 
> Acked-by: Mauro Carvalho Chehab <mchehab@kernel.org>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  tests/xe/xe_exec_basic.c | 33 ++++++++++++++++++++++++++++-----
>  1 file changed, 28 insertions(+), 5 deletions(-)
> 
> diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
> index af581c327..5624d31aa 100644
> --- a/tests/xe/xe_exec_basic.c
> +++ b/tests/xe/xe_exec_basic.c
> @@ -28,6 +28,7 @@
>  #define BIND_ENGINE	(0x1 << 4)
>  #define DEFER_ALLOC	(0x1 << 5)
>  #define DEFER_BIND	(0x1 << 6)
> +#define SPARSE		(0x1 << 7)
>  
>  /**
>   * SUBTEST: once-%s
> @@ -70,6 +71,10 @@
>   * @bindengine-userptr-rebind:		bind engine userptr rebind
>   * @bindengine-userptr-invalidate:	bind engine userptr invalidate
>   * @bindengine-userptr-invalidate-race:	bind engine userptr invalidate racy
> + * @null:				null
> + * @null-defer-mmap:			null defer mmap
> + * @null-defer-bind:			null defer bind
> + * @null-rebind:			null rebind
>   */
>  
>  static void
> @@ -86,6 +91,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  		.syncs = to_user_pointer(sync),
>  	};
>  	uint64_t addr[MAX_N_ENGINES];
> +	uint64_t sparse_addr[MAX_N_ENGINES];
>  	uint32_t vm[MAX_N_ENGINES];
>  	uint32_t engines[MAX_N_ENGINES];
>  	uint32_t bind_engines[MAX_N_ENGINES];
> @@ -110,8 +116,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  			xe_get_default_alignment(fd));
>  
>  	addr[0] = 0x1a0000;
> -	for (i = 1; i < MAX_N_ENGINES; ++i)
> +	sparse_addr[0] = 0x301a0000;

Why 0x301a0000?
(Although I also never understood where the 0x1a0000 also came from to start with...)

> +	for (i = 1; i < MAX_N_ENGINES; ++i) {
>  		addr[i] = addr[i - 1] + (0x1ull << 32);
> +		sparse_addr[i] = sparse_addr[i - 1] + (0x1ull << 32);
> +	}
>  
>  	if (flags & USERPTR) {
>  #define	MAP_ADDRESS	0x00007fadeadbe000
> @@ -160,6 +169,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  			xe_vm_bind_userptr_async(fd, vm[i], bind_engines[i],
>  						 to_user_pointer(data), addr[i],
>  						 bo_size, sync, 1);
> +		if (flags & SPARSE)
> +			__xe_vm_bind_assert(fd, vm[i], bind_engines[i],
> +					    0, 0, sparse_addr[i], bo_size,
> +					    XE_VM_BIND_OP_MAP |
> +					    XE_VM_BIND_FLAG_ASYNC |
> +					    XE_VM_BIND_FLAG_NULL, sync,
> +					    1, 0, 0);
>  	}
>  
>  	if (flags & DEFER_BIND)
> @@ -171,7 +187,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  		uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
>  		uint64_t batch_addr = __addr + batch_offset;
>  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> -		uint64_t sdi_addr = __addr + sdi_offset;
> +		uint64_t sdi_addr = (flags & SPARSE ? sparse_addr[i % n_vm] :
> +				     __addr)+ sdi_offset;
>  		int e = i % n_engines;
>  
>  		b = 0;
> @@ -258,9 +275,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  					INT64_MAX, 0, NULL));
>  	}
>  
> -	for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
> -	     i < n_execs; i++)
> -		igt_assert_eq(data[i].data, 0xc0ffee);
> +	if (!(flags & SPARSE)) {
> +		for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
> +		     i < n_execs; i++)
> +			igt_assert_eq(data[i].data, 0xc0ffee);
> +	}

As far as I could see, the basic exec also happens here, and this null bind
for sparse is an extra one, so why not check the correctness of that basic anyway?

oh, and if we check the basic we also need to add 'basic-' in the subtest
names below...

>  
>  	for (i = 0; i < n_engines; i++) {
>  		syncobj_destroy(fd, syncobjs[i]);
> @@ -293,6 +312,10 @@ igt_main
>  		{ "basic-defer-bind", DEFER_ALLOC | DEFER_BIND },
>  		{ "userptr", USERPTR },
>  		{ "rebind", REBIND },
> +		{ "null", SPARSE },

and talking about the naming... is the XE_VM_BIND_FLAG_NULL only used for sparse?
Is any bind for sparse required to use the NULL?
It looks to me that we have a strange mapping name here and we
should stick to either
{ "sparse", SPARSE },
{ "null", NULL },

but maybe it is just me missing something here...

> +		{ "null-defer-mmap", SPARSE | DEFER_ALLOC },
> +		{ "null-defer-bind", SPARSE | DEFER_ALLOC | DEFER_BIND },
> +		{ "null-rebind", SPARSE | REBIND },
>  		{ "userptr-rebind", USERPTR | REBIND },
>  		{ "userptr-invalidate", USERPTR | INVALIDATE },
>  		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section
  2023-07-10 14:58 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
@ 2023-07-13 15:24   ` Rodrigo Vivi
  2023-07-14  4:13     ` Matthew Brost
  0 siblings, 1 reply; 19+ messages in thread
From: Rodrigo Vivi @ 2023-07-13 15:24 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Mon, Jul 10, 2023 at 07:58:53AM -0700, Matthew Brost wrote:
> GPUVA added support for this let's test it.
> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 314 insertions(+), 36 deletions(-)
> 
> diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> index 04d6c3956..434bdb332 100644
> --- a/tests/xe/xe_vm.c
> +++ b/tests/xe/xe_vm.c
> @@ -1249,9 +1249,9 @@ static void *hammer_thread(void *tdata)
>  	return NULL;
>  }
>  
> -#define MUNMAP_FLAG_USERPTR		(0x1 << 0)
> -#define MUNMAP_FLAG_INVALIDATE		(0x1 << 1)
> -#define MUNMAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> +#define MAP_FLAG_USERPTR		(0x1 << 0)
> +#define MAP_FLAG_INVALIDATE		(0x1 << 1)
> +#define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
>  
>  
>  /**
> @@ -1344,7 +1344,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
>  	bo_size = page_size * bo_n_pages;
>  
> -	if (flags & MUNMAP_FLAG_USERPTR) {
> +	if (flags & MAP_FLAG_USERPTR) {
>  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
>  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
>  			    MAP_ANONYMOUS, -1, 0);
> @@ -1363,7 +1363,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	/* Do initial binds */
>  	bind_size = (page_size * bo_n_pages) / n_binds;
>  	for (i = 0; i < n_binds; ++i) {
> -		if (flags & MUNMAP_FLAG_USERPTR)
> +		if (flags & MAP_FLAG_USERPTR)
>  			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
>  						 bind_size, sync, 1);
>  		else
> @@ -1378,7 +1378,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	 * cause a fault if a rebind occurs during munmap style VM unbind
>  	 * (partial VMAs unbound).
>  	 */
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		t.fd = fd;
>  		t.vm = vm;
>  #define PAGE_SIZE	4096
> @@ -1437,7 +1437,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  		data = map + i * page_size;
>  		igt_assert_eq(data->data, 0xc0ffee);
>  	}
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		memset(map, 0, PAGE_SIZE / 2);
>  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
>  	} else {
> @@ -1487,7 +1487,7 @@ try_again_after_invalidate:
>  			igt_assert_eq(data->data, 0xc0ffee);
>  		}
>  	}
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		memset(map, 0, PAGE_SIZE / 2);
>  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
>  	} else {
> @@ -1498,7 +1498,7 @@ try_again_after_invalidate:
>  	 * The munmap style VM unbind can create new VMAs, make sure those are
>  	 * in the bookkeeping for another rebind after a userptr invalidate.
>  	 */
> -	if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
> +	if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
>  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
>  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
>  			    MAP_ANONYMOUS, -1, 0);
> @@ -1509,7 +1509,7 @@ try_again_after_invalidate:
>  	/* Confirm unbound region can be rebound */
>  	syncobj_reset(fd, &sync[0].handle, 1);
>  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> -	if (flags & MUNMAP_FLAG_USERPTR)
> +	if (flags & MAP_FLAG_USERPTR)
>  		xe_vm_bind_userptr_async(fd, vm, 0,
>  					 addr + unbind_n_page_offfset * page_size,
>  					 addr + unbind_n_page_offfset * page_size,
> @@ -1557,7 +1557,7 @@ try_again_after_invalidate:
>  		igt_assert_eq(data->data, 0xc0ffee);
>  	}
>  
> -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
>  		exit = 1;
>  		pthread_join(t.thread, NULL);
>  		pthread_barrier_destroy(&barrier);
> @@ -1572,6 +1572,251 @@ try_again_after_invalidate:
>  	xe_vm_destroy(fd, vm);
>  }
>  
> +/**
> + * SUBTEST: mmap-style-bind-%s
> + * Description: Test mmap style unbind with %arg[1]
> + * Run type: FULL
> + * TODO: change ``'Run type' == FULL`` to a better category
> + *
> + * arg[1]:
> + *
> + * @all:				all
> + * @one-partial:			one partial
> + * @either-side-partial:		either side partial
> + * @either-side-full:			either side full
> + * @either-side-partial-hammer:		either side partial hammer
> + * @end:				end
> + * @front:				front
> + * @many-all:				many all
> + * @many-either-side-partial:		many either side partial
> + * @many-either-side-partial-hammer:	many either side partial hammer
> + * @userptr-all:			userptr all
> + * @userptr-one-partial:		userptr one partial
> + * @userptr-either-side-partial:	userptr either side partial
> + * @userptr-either-side-full:		userptr either side full
> + */
> +
> +static void
> +test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> +		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
> +		     int unbind_n_pages, unsigned int flags)
> +{
> +	struct drm_xe_sync sync[2] = {
> +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> +	};
> +	struct drm_xe_exec exec = {
> +		.num_batch_buffer = 1,
> +		.num_syncs = 2,
> +		.syncs = to_user_pointer(sync),
> +	};
> +	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> +	uint32_t vm;
> +	uint32_t engine;
> +	size_t bo_size;
> +	uint32_t bo0 = 0, bo1 = 0;
> +	uint64_t bind_size;
> +	uint64_t page_size = xe_get_default_alignment(fd);

Do we have plans to add support to the other supported cases with different
offsets, alignment, etc?

anyway this test looks correct to me

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

> +	struct {
> +		uint32_t batch[16];
> +		uint64_t pad;
> +		uint32_t data;
> +	} *data;
> +	void *map0, *map1;
> +	int i, b;
> +	struct thread_data t;
> +	pthread_barrier_t barrier;
> +	int exit = 0;
> +
> +	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> +	bo_size = page_size * bo_n_pages;
> +
> +	if (flags & MAP_FLAG_USERPTR) {
> +		map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> +			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> +			    MAP_ANONYMOUS, -1, 0);
> +		map1 = mmap(from_user_pointer(addr + bo_size),
> +			    bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
> +			    MAP_FIXED | MAP_ANONYMOUS, -1, 0);
> +		igt_assert(map0 != MAP_FAILED);
> +		igt_assert(map1 != MAP_FAILED);
> +	} else {
> +		bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> +		map0 = xe_bo_map(fd, bo0, bo_size);
> +		bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> +		map1 = xe_bo_map(fd, bo1, bo_size);
> +	}
> +	memset(map0, 0, bo_size);
> +	memset(map1, 0, bo_size);
> +
> +	engine = xe_engine_create(fd, vm, eci, 0);
> +
> +	sync[0].handle = syncobj_create(fd, 0);
> +	sync[1].handle = syncobj_create(fd, 0);
> +
> +	/* Do initial binds */
> +	bind_size = (page_size * bo_n_pages) / n_binds;
> +	for (i = 0; i < n_binds; ++i) {
> +		if (flags & MAP_FLAG_USERPTR)
> +			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
> +						 bind_size, sync, 1);
> +		else
> +			xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
> +					 addr, bind_size, sync, 1);
> +		addr += bind_size;
> +	}
> +	addr = base_addr;
> +
> +	/*
> +	 * Kick a thread to write the first page continously to ensure we can't
> +	 * cause a fault if a rebind occurs during munmap style VM unbind
> +	 * (partial VMAs unbound).
> +	 */
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> +		t.fd = fd;
> +		t.vm = vm;
> +#define PAGE_SIZE	4096
> +		t.addr = addr + PAGE_SIZE / 2;
> +		t.eci = eci;
> +		t.exit = &exit;
> +		t.map = map0 + PAGE_SIZE / 2;
> +		t.barrier = &barrier;
> +		pthread_barrier_init(&barrier, NULL, 2);
> +		pthread_create(&t.thread, 0, hammer_thread, &t);
> +		pthread_barrier_wait(&barrier);
> +	}
> +
> +	/* Verify we can use every page */
> +	for (i = 0; i < n_binds; ++i) {
> +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> +		uint64_t batch_addr = addr + batch_offset;
> +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> +		uint64_t sdi_addr = addr + sdi_offset;
> +		data = map0 + i * page_size;
> +
> +		b = 0;
> +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +		data->batch[b++] = sdi_addr;
> +		data->batch[b++] = sdi_addr >> 32;
> +		data->batch[b++] = 0xc0ffee;
> +		data->batch[b++] = MI_BATCH_BUFFER_END;
> +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> +		if (i)
> +			syncobj_reset(fd, &sync[1].handle, 1);
> +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> +
> +		exec.engine_id = engine;
> +		exec.address = batch_addr;
> +		xe_exec(fd, &exec);
> +
> +		addr += page_size;
> +	}
> +	addr = base_addr;
> +
> +	/* Bind some of the pages to different BO / userptr */
> +	syncobj_reset(fd, &sync[0].handle, 1);
> +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> +	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> +	if (flags & MAP_FLAG_USERPTR)
> +		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
> +					 unbind_n_page_offfset * page_size,
> +					 addr + unbind_n_page_offfset * page_size,
> +					 unbind_n_pages * page_size, sync, 2);
> +	else
> +		xe_vm_bind_async(fd, vm, 0, bo1,
> +				 unbind_n_page_offfset * page_size,
> +				 addr + unbind_n_page_offfset * page_size,
> +				 unbind_n_pages * page_size, sync, 2);
> +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> +
> +	/* Verify all pages written */
> +	for (i = 0; i < n_binds; ++i) {
> +		data = map0 + i * page_size;
> +		igt_assert_eq(data->data, 0xc0ffee);
> +	}
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> +		memset(map0, 0, PAGE_SIZE / 2);
> +		memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> +	} else {
> +		memset(map0, 0, bo_size);
> +		memset(map1, 0, bo_size);
> +	}
> +
> +	/* Verify we can use every page */
> +	for (i = 0; i < n_binds; ++i) {
> +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> +		uint64_t batch_addr = addr + batch_offset;
> +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> +		uint64_t sdi_addr = addr + sdi_offset;
> +
> +		data = map0 + i * page_size;
> +		b = 0;
> +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +		data->batch[b++] = sdi_addr;
> +		data->batch[b++] = sdi_addr >> 32;
> +		data->batch[b++] = 0xc0ffee;
> +		data->batch[b++] = MI_BATCH_BUFFER_END;
> +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> +		data = map1 + i * page_size;
> +		b = 0;
> +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +		data->batch[b++] = sdi_addr;
> +		data->batch[b++] = sdi_addr >> 32;
> +		data->batch[b++] = 0xc0ffee;
> +		data->batch[b++] = MI_BATCH_BUFFER_END;
> +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> +
> +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> +		if (i)
> +			syncobj_reset(fd, &sync[1].handle, 1);
> +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> +
> +		exec.engine_id = engine;
> +		exec.address = batch_addr;
> +		xe_exec(fd, &exec);
> +
> +		addr += page_size;
> +	}
> +	addr = base_addr;
> +
> +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> +
> +	/* Verify all pages written */
> +	for (i = 0; i < n_binds; ++i) {
> +		uint32_t result = 0;
> +
> +		data = map0 + i * page_size;
> +		result |= data->data;
> +
> +		data = map1 + i * page_size;
> +		result |= data->data;
> +
> +		igt_assert_eq(result, 0xc0ffee);
> +	}
> +
> +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> +		exit = 1;
> +		pthread_join(t.thread, NULL);
> +		pthread_barrier_destroy(&barrier);
> +	}
> +
> +	syncobj_destroy(fd, sync[0].handle);
> +	syncobj_destroy(fd, sync[1].handle);
> +	xe_engine_destroy(fd, engine);
> +	munmap(map0, bo_size);
> +	munmap(map1, bo_size);
> +	if (bo0)
> +		gem_close(fd, bo0);
> +	if (bo1)
> +		gem_close(fd, bo1);
> +	xe_vm_destroy(fd, vm);
> +}
> +
>  igt_main
>  {
>  	struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
> @@ -1584,55 +1829,74 @@ igt_main
>  		int unbind_n_page_offfset;
>  		int unbind_n_pages;
>  		unsigned int flags;
> -	} sections[] = {
> +	} munmap_sections[] = {
>  		{ "all", 4, 2, 0, 4, 0 },
>  		{ "one-partial", 4, 1, 1, 2, 0 },
>  		{ "either-side-partial", 4, 2, 1, 2, 0 },
>  		{ "either-side-partial-hammer", 4, 2, 1, 2,
> -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
>  		{ "either-side-full", 4, 4, 1, 2, 0 },
>  		{ "end", 4, 2, 0, 3, 0 },
>  		{ "front", 4, 2, 1, 3, 0 },
>  		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
>  		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
>  		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
>  		{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
>  		{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
>  		{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
> -		{ "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
> -		{ "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
> +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
>  		{ "userptr-either-side-partial", 4, 2, 1, 2,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-either-side-full", 4, 4, 1, 2,
> -			MUNMAP_FLAG_USERPTR },
> -		{ "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
> -		{ "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
> +		{ "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
> +		{ "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
>  		{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR },
> +			MAP_FLAG_USERPTR },
>  		{ "userptr-inval-either-side-full", 4, 4, 1, 2,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> -		{ "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
> -			MUNMAP_FLAG_INVALIDATE },
> -		{ "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
> -			MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> +		{ "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
> +			MAP_FLAG_INVALIDATE },
> +		{ "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
> +			MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
> -			4 * 8 - 2, MUNMAP_FLAG_USERPTR |
> -				MUNMAP_FLAG_INVALIDATE },
> +			4 * 8 - 2, MAP_FLAG_USERPTR |
> +				MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
> -			2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
>  		{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> +		{ NULL },
> +	};
> +	const struct section mmap_sections[] = {
> +		{ "all", 4, 2, 0, 4, 0 },
> +		{ "one-partial", 4, 1, 1, 2, 0 },
> +		{ "either-side-partial", 4, 2, 1, 2, 0 },
> +		{ "either-side-full", 4, 4, 1, 2, 0 },
> +		{ "either-side-partial-hammer", 4, 2, 1, 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
> +		{ "end", 4, 2, 0, 3, 0 },
> +		{ "front", 4, 2, 1, 3, 0 },
> +		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> +		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
> +		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE },
> +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
> +		{ "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
> +		{ "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
>  		{ NULL },
>  	};
>  
> @@ -1839,7 +2103,7 @@ igt_main
>  			break;
>  		}
>  
> -	for (const struct section *s = sections; s->name; s++) {
> +	for (const struct section *s = munmap_sections; s->name; s++) {
>  		igt_subtest_f("munmap-style-unbind-%s", s->name) {
>  			igt_require_f(hwe_non_copy,
>  				      "Requires non-copy engine to run\n");
> @@ -1853,6 +2117,20 @@ igt_main
>  		}
>  	}
>  
> +	for (const struct section *s = mmap_sections; s->name; s++) {
> +		igt_subtest_f("mmap-style-bind-%s", s->name) {
> +			igt_require_f(hwe_non_copy,
> +				      "Requires non-copy engine to run\n");
> +
> +			test_mmap_style_bind(fd, hwe_non_copy,
> +					     s->bo_n_pages,
> +					     s->n_binds,
> +					     s->unbind_n_page_offfset,
> +					     s->unbind_n_pages,
> +					     s->flags);
> +		}
> +	}
> +
>  	igt_fixture
>  		drm_close_driver(fd);
>  }
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages
  2023-07-10 14:58 ` [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
@ 2023-07-13 15:30   ` Rodrigo Vivi
  2023-07-14  4:11     ` Matthew Brost
  0 siblings, 1 reply; 19+ messages in thread
From: Rodrigo Vivi @ 2023-07-13 15:30 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Mon, Jul 10, 2023 at 07:58:54AM -0700, Matthew Brost wrote:
> Splitting large pages involves using dma-resv slots for ordering, make
> sure this works.
> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  tests/xe/xe_vm.c | 81 +++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 60 insertions(+), 21 deletions(-)
> 
> diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> index 434bdb332..5fd511f0b 100644
> --- a/tests/xe/xe_vm.c
> +++ b/tests/xe/xe_vm.c
> @@ -1252,7 +1252,8 @@ static void *hammer_thread(void *tdata)
>  #define MAP_FLAG_USERPTR		(0x1 << 0)
>  #define MAP_FLAG_INVALIDATE		(0x1 << 1)
>  #define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> -
> +#define MAP_FLAG_LARGE_PAGE		(0x1 << 3)
> +#define MAP_FLAG_LARGE_PAGE_NO_SPLIT	(0x1 << 4)
>  
>  /**
>   * SUBTEST: munmap-style-unbind-%s
> @@ -1305,12 +1306,16 @@ static void *hammer_thread(void *tdata)
>   *					userptr inval many either side full
>   * @userptr-inval-many-end:		userptr inval many end
>   * @userptr-inval-many-front:		userptr inval many front
> + * @either-side-partial-large-page-hammer:
> + *					either side partial large page hammer
> + * @either-side-partial-split-page-hammer:
> + *					either side partial split page hammer
>   */
>  
>  static void
>  test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  			 int bo_n_pages, int n_binds,
> -			 int unbind_n_page_offfset, int unbind_n_pages,
> +			 int unbind_n_page_offset, int unbind_n_pages,
>  			 unsigned int flags)
>  {
>  	struct drm_xe_sync sync[2] = {
> @@ -1322,7 +1327,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  		.num_syncs = 2,
>  		.syncs = to_user_pointer(sync),
>  	};
> -	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> +	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
>  	uint32_t vm;
>  	uint32_t engine;
>  	size_t bo_size;
> @@ -1340,6 +1345,14 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	struct thread_data t;
>  	pthread_barrier_t barrier;
>  	int exit = 0;
> +	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
> +
> +	if (flags & MAP_FLAG_LARGE_PAGE) {
> +		bo_n_pages *= n_page_per_2mb;
> +		unbind_n_pages *= n_page_per_2mb;
> +		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
> +			unbind_n_page_offset *= n_page_per_2mb;
> +	}
>  
>  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
>  	bo_size = page_size * bo_n_pages;
> @@ -1426,7 +1439,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
>  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
>  	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
>  	xe_vm_unbind_async(fd, vm, 0, 0,
> -			   addr + unbind_n_page_offfset * page_size,
> +			   addr + unbind_n_page_offset * page_size,
>  			   unbind_n_pages * page_size, sync, 2);
>  
>  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> @@ -1455,8 +1468,8 @@ try_again_after_invalidate:
>  		data = map + i * page_size;
>  		addr += page_size;
>  
> -		if (i < unbind_n_page_offfset ||
> -		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
> +		if (i < unbind_n_page_offset ||
> +		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
>  			b = 0;
>  			data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
>  			data->batch[b++] = sdi_addr;
> @@ -1481,8 +1494,8 @@ try_again_after_invalidate:
>  
>  	/* Verify all pages still bound written */
>  	for (i = 0; i < n_binds; ++i) {
> -		if (i < unbind_n_page_offfset ||
> -		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
> +		if (i < unbind_n_page_offset ||
> +		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
>  			data = map + i * page_size;
>  			igt_assert_eq(data->data, 0xc0ffee);
>  		}
> @@ -1511,13 +1524,13 @@ try_again_after_invalidate:
>  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
>  	if (flags & MAP_FLAG_USERPTR)
>  		xe_vm_bind_userptr_async(fd, vm, 0,
> -					 addr + unbind_n_page_offfset * page_size,
> -					 addr + unbind_n_page_offfset * page_size,
> +					 addr + unbind_n_page_offset * page_size,
> +					 addr + unbind_n_page_offset * page_size,
>  					 unbind_n_pages * page_size, sync, 1);
>  	else
>  		xe_vm_bind_async(fd, vm, 0, bo,
> -				 unbind_n_page_offfset * page_size,
> -				 addr + unbind_n_page_offfset * page_size,
> +				 unbind_n_page_offset * page_size,
> +				 addr + unbind_n_page_offset * page_size,
>  				 unbind_n_pages * page_size, sync, 1);
>  
>  	/* Verify we can use every page */
> @@ -1594,11 +1607,15 @@ try_again_after_invalidate:
>   * @userptr-one-partial:		userptr one partial
>   * @userptr-either-side-partial:	userptr either side partial
>   * @userptr-either-side-full:		userptr either side full
> + * @either-side-partial-large-page-hammer:
> + *					either side partial large page hammer
> + * @either-side-partial-split-page-hammer:
> + *					either side partial split page hammer
>   */
>  
>  static void
>  test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> -		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
> +		     int bo_n_pages, int n_binds, int unbind_n_page_offset,
>  		     int unbind_n_pages, unsigned int flags)
>  {
>  	struct drm_xe_sync sync[2] = {
> @@ -1610,7 +1627,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
>  		.num_syncs = 2,
>  		.syncs = to_user_pointer(sync),
>  	};
> -	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> +	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;

why do we need to change the offset here?

>  	uint32_t vm;
>  	uint32_t engine;
>  	size_t bo_size;
> @@ -1627,6 +1644,14 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
>  	struct thread_data t;
>  	pthread_barrier_t barrier;
>  	int exit = 0;
> +	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
> +
> +	if (flags & MAP_FLAG_LARGE_PAGE) {
> +		bo_n_pages *= n_page_per_2mb;
> +		unbind_n_pages *= n_page_per_2mb;
> +		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
> +			unbind_n_page_offset *= n_page_per_2mb;
> +	}
>  
>  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
>  	bo_size = page_size * bo_n_pages;
> @@ -1721,13 +1746,13 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
>  	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
>  	if (flags & MAP_FLAG_USERPTR)
>  		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
> -					 unbind_n_page_offfset * page_size,
> -					 addr + unbind_n_page_offfset * page_size,
> +					 unbind_n_page_offset * page_size,
> +					 addr + unbind_n_page_offset * page_size,
>  					 unbind_n_pages * page_size, sync, 2);
>  	else
>  		xe_vm_bind_async(fd, vm, 0, bo1,
> -				 unbind_n_page_offfset * page_size,
> -				 addr + unbind_n_page_offfset * page_size,
> +				 unbind_n_page_offset * page_size,
> +				 addr + unbind_n_page_offset * page_size,

probably the typo fix could be a separated patch to avoid noise here...
because now I'm wondering if the addr change is a change that is needed for
the large page or just something extra this patch covered like the typo here.

But anyway, it is already here, no need to change...

Everything looks okay on the test itself... and I will eventually get the
addr choices...

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

>  				 unbind_n_pages * page_size, sync, 2);
>  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
>  	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> @@ -1826,7 +1851,7 @@ igt_main
>  		const char *name;
>  		int bo_n_pages;
>  		int n_binds;
> -		int unbind_n_page_offfset;
> +		int unbind_n_page_offset;
>  		int unbind_n_pages;
>  		unsigned int flags;
>  	} munmap_sections[] = {
> @@ -1835,6 +1860,13 @@ igt_main
>  		{ "either-side-partial", 4, 2, 1, 2, 0 },
>  		{ "either-side-partial-hammer", 4, 2, 1, 2,
>  			MAP_FLAG_HAMMER_FIRST_PAGE },
> +		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE |
> +			MAP_FLAG_LARGE_PAGE },
> +		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE |
> +			MAP_FLAG_LARGE_PAGE |
> +			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
>  		{ "either-side-full", 4, 4, 1, 2, 0 },
>  		{ "end", 4, 2, 0, 3, 0 },
>  		{ "front", 4, 2, 1, 3, 0 },
> @@ -1887,6 +1919,13 @@ igt_main
>  		{ "either-side-full", 4, 4, 1, 2, 0 },
>  		{ "either-side-partial-hammer", 4, 2, 1, 2,
>  			MAP_FLAG_HAMMER_FIRST_PAGE },
> +		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE |
> +			MAP_FLAG_LARGE_PAGE },
> +		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
> +			MAP_FLAG_HAMMER_FIRST_PAGE |
> +			MAP_FLAG_LARGE_PAGE |
> +			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
>  		{ "end", 4, 2, 0, 3, 0 },
>  		{ "front", 4, 2, 1, 3, 0 },
>  		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> @@ -2111,7 +2150,7 @@ igt_main
>  			test_munmap_style_unbind(fd, hwe_non_copy,
>  						 s->bo_n_pages,
>  						 s->n_binds,
> -						 s->unbind_n_page_offfset,
> +						 s->unbind_n_page_offset,
>  						 s->unbind_n_pages,
>  						 s->flags);
>  		}
> @@ -2125,7 +2164,7 @@ igt_main
>  			test_mmap_style_bind(fd, hwe_non_copy,
>  					     s->bo_n_pages,
>  					     s->n_binds,
> -					     s->unbind_n_page_offfset,
> +					     s->unbind_n_page_offset,
>  					     s->unbind_n_pages,
>  					     s->flags);
>  		}
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test
  2023-07-10 14:58 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
@ 2023-07-13 15:32   ` Rodrigo Vivi
  2023-07-14  4:08     ` Matthew Brost
  0 siblings, 1 reply; 19+ messages in thread
From: Rodrigo Vivi @ 2023-07-13 15:32 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Mon, Jul 10, 2023 at 07:58:55AM -0700, Matthew Brost wrote:
> Exercises the maple tree dep tracker logic.

could you please add a bit more of explanation here on how this
is achieving the objective?

> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  tests/xe/xe_vm.c | 6 ++++++
>  1 file changed, 6 insertions(+)
> 
> diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> index 5fd511f0b..36cd80357 100644
> --- a/tests/xe/xe_vm.c
> +++ b/tests/xe/xe_vm.c
> @@ -811,6 +811,12 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
>  				NULL));
>  	igt_assert_eq(data[0].data, 0xc0ffee);
>  
> +	syncobj_destroy(fd, sync[0].handle);
> +	sync[0].handle = syncobj_create(fd, 0);
> +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> +	xe_vm_unbind_all_async(fd, vm, 0, bo, sync, 1);
> +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));

should we have an extra flag for this? no risk to the existent tests?

> +
>  	syncobj_destroy(fd, sync[0].handle);
>  	for (i = 0; i < N_ENGINES; i++) {
>  		syncobj_destroy(fd, syncobjs[i]);
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section
  2023-07-10 14:58 ` [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section Matthew Brost
@ 2023-07-13 15:34   ` Rodrigo Vivi
  0 siblings, 0 replies; 19+ messages in thread
From: Rodrigo Vivi @ 2023-07-13 15:34 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Mon, Jul 10, 2023 at 07:58:56AM -0700, Matthew Brost wrote:
> Verify bind engines can't race and corrupt page tables.
> 
> Signed-of-by: Matthew Brost <matthew.brost@intel.com>

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

> ---
>  tests/xe/xe_vm.c | 43 ++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 36 insertions(+), 7 deletions(-)
> 
> diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> index 36cd80357..315cb7009 100644
> --- a/tests/xe/xe_vm.c
> +++ b/tests/xe/xe_vm.c
> @@ -689,10 +689,18 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
>   * Description: Test independent bind engines
>   * Functionality: bind engines
>   * Run type: BAT
> + *
> + * SUBTEST: bind-engines-conflict
> + * Description: Test conflict bind engines
> + * Functionality: bind engines
> + * Run type: BAT
>   */
>  
> +#define CONFLICT	(0x1 << 0)
> +
>  static void
> -test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
> +test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> +			      unsigned int flags)
>  {
>  	uint32_t vm;
>  	uint64_t addr = 0x1a0000;
> @@ -759,7 +767,7 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
>  			xe_spin_wait_started(&data[i].spin);
>  
>  			/* Do bind to 1st engine blocked on cork */
> -			addr += bo_size;
> +			addr += (flags & CONFLICT) ? (0x1 << 21) : bo_size;
>  			sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
>  			sync[1].handle = syncobjs[e];
>  			xe_vm_bind_async(fd, vm, bind_engines[e], bo, 0, addr,
> @@ -795,10 +803,20 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
>  		xe_exec(fd, &exec);
>  	}
>  
> -	/* Verify initial bind, bind + write to 2nd engine done */
> -	igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0, NULL));
> -	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> -	igt_assert_eq(data[1].data, 0xc0ffee);
> +	if (!(flags & CONFLICT)) {
> +		/* Verify initial bind, bind + write to 2nd engine done */
> +		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
> +					NULL));
> +		igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0,
> +					NULL));
> +		igt_assert_eq(data[1].data, 0xc0ffee);
> +	} else {
> +		/* Let jobs runs for a bit */
> +		usleep(100000);
> +		/* bind + write to 2nd engine waiting */
> +		igt_assert(!syncobj_wait(fd, &syncobjs[1], 1, 1, 0, NULL));
> +		igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
> +	}
>  
>  	/* Verify bind + write to 1st engine still inflight */
>  	igt_assert(!syncobj_wait(fd, &syncobjs[0], 1, 1, 0, NULL));
> @@ -811,6 +829,13 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
>  				NULL));
>  	igt_assert_eq(data[0].data, 0xc0ffee);
>  
> +	if (flags & CONFLICT) {
> +		/* Verify bind + write to 2nd engine done */
> +		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
> +					NULL));
> +		igt_assert_eq(data[1].data, 0xc0ffee);
> +	}
> +
>  	syncobj_destroy(fd, sync[0].handle);
>  	sync[0].handle = syncobj_create(fd, 0);
>  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> @@ -2001,7 +2026,11 @@ igt_main
>  
>  	igt_subtest("bind-engines-independent")
>  		xe_for_each_hw_engine(fd, hwe)
> -			test_bind_engines_independent(fd, hwe);
> +			test_bind_engines_independent(fd, hwe, 0);
> +
> +	igt_subtest("bind-engines-conflict")
> +		xe_for_each_hw_engine(fd, hwe)
> +			test_bind_engines_independent(fd, hwe, CONFLICT);
>  
>  	igt_subtest("bind-array-twice")
>  		xe_for_each_hw_engine(fd, hwe)
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test
  2023-07-13 15:32   ` Rodrigo Vivi
@ 2023-07-14  4:08     ` Matthew Brost
  0 siblings, 0 replies; 19+ messages in thread
From: Matthew Brost @ 2023-07-14  4:08 UTC (permalink / raw)
  To: Rodrigo Vivi; +Cc: igt-dev

On Thu, Jul 13, 2023 at 11:32:53AM -0400, Rodrigo Vivi wrote:
> On Mon, Jul 10, 2023 at 07:58:55AM -0700, Matthew Brost wrote:
> > Exercises the maple tree dep tracker logic.
> 
> could you please add a bit more of explanation here on how this
> is achieving the objective?
>

This is a stale comment. I'll update.
 
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >  tests/xe/xe_vm.c | 6 ++++++
> >  1 file changed, 6 insertions(+)
> > 
> > diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> > index 5fd511f0b..36cd80357 100644
> > --- a/tests/xe/xe_vm.c
> > +++ b/tests/xe/xe_vm.c
> > @@ -811,6 +811,12 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
> >  				NULL));
> >  	igt_assert_eq(data[0].data, 0xc0ffee);
> >  
> > +	syncobj_destroy(fd, sync[0].handle);
> > +	sync[0].handle = syncobj_create(fd, 0);
> > +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > +	xe_vm_unbind_all_async(fd, vm, 0, bo, sync, 1);
> > +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> 
> should we have an extra flag for this? no risk to the existent tests?
>

No flag needed, this just cleans up all VMA mapppings.

Matt
 
> > +
> >  	syncobj_destroy(fd, sync[0].handle);
> >  	for (i = 0; i < N_ENGINES; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -- 
> > 2.34.1
> > 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages
  2023-07-13 15:30   ` Rodrigo Vivi
@ 2023-07-14  4:11     ` Matthew Brost
  0 siblings, 0 replies; 19+ messages in thread
From: Matthew Brost @ 2023-07-14  4:11 UTC (permalink / raw)
  To: Rodrigo Vivi; +Cc: igt-dev

On Thu, Jul 13, 2023 at 11:30:19AM -0400, Rodrigo Vivi wrote:
> On Mon, Jul 10, 2023 at 07:58:54AM -0700, Matthew Brost wrote:
> > Splitting large pages involves using dma-resv slots for ordering, make
> > sure this works.
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >  tests/xe/xe_vm.c | 81 +++++++++++++++++++++++++++++++++++-------------
> >  1 file changed, 60 insertions(+), 21 deletions(-)
> > 
> > diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> > index 434bdb332..5fd511f0b 100644
> > --- a/tests/xe/xe_vm.c
> > +++ b/tests/xe/xe_vm.c
> > @@ -1252,7 +1252,8 @@ static void *hammer_thread(void *tdata)
> >  #define MAP_FLAG_USERPTR		(0x1 << 0)
> >  #define MAP_FLAG_INVALIDATE		(0x1 << 1)
> >  #define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> > -
> > +#define MAP_FLAG_LARGE_PAGE		(0x1 << 3)
> > +#define MAP_FLAG_LARGE_PAGE_NO_SPLIT	(0x1 << 4)
> >  
> >  /**
> >   * SUBTEST: munmap-style-unbind-%s
> > @@ -1305,12 +1306,16 @@ static void *hammer_thread(void *tdata)
> >   *					userptr inval many either side full
> >   * @userptr-inval-many-end:		userptr inval many end
> >   * @userptr-inval-many-front:		userptr inval many front
> > + * @either-side-partial-large-page-hammer:
> > + *					either side partial large page hammer
> > + * @either-side-partial-split-page-hammer:
> > + *					either side partial split page hammer
> >   */
> >  
> >  static void
> >  test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  			 int bo_n_pages, int n_binds,
> > -			 int unbind_n_page_offfset, int unbind_n_pages,
> > +			 int unbind_n_page_offset, int unbind_n_pages,
> >  			 unsigned int flags)
> >  {
> >  	struct drm_xe_sync sync[2] = {
> > @@ -1322,7 +1327,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> > +	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
> >  	uint32_t vm;
> >  	uint32_t engine;
> >  	size_t bo_size;
> > @@ -1340,6 +1345,14 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	struct thread_data t;
> >  	pthread_barrier_t barrier;
> >  	int exit = 0;
> > +	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
> > +
> > +	if (flags & MAP_FLAG_LARGE_PAGE) {
> > +		bo_n_pages *= n_page_per_2mb;
> > +		unbind_n_pages *= n_page_per_2mb;
> > +		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
> > +			unbind_n_page_offset *= n_page_per_2mb;
> > +	}
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> >  	bo_size = page_size * bo_n_pages;
> > @@ -1426,7 +1439,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> >  	xe_vm_unbind_async(fd, vm, 0, 0,
> > -			   addr + unbind_n_page_offfset * page_size,
> > +			   addr + unbind_n_page_offset * page_size,
> >  			   unbind_n_pages * page_size, sync, 2);
> >  
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > @@ -1455,8 +1468,8 @@ try_again_after_invalidate:
> >  		data = map + i * page_size;
> >  		addr += page_size;
> >  
> > -		if (i < unbind_n_page_offfset ||
> > -		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
> > +		if (i < unbind_n_page_offset ||
> > +		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
> >  			b = 0;
> >  			data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> >  			data->batch[b++] = sdi_addr;
> > @@ -1481,8 +1494,8 @@ try_again_after_invalidate:
> >  
> >  	/* Verify all pages still bound written */
> >  	for (i = 0; i < n_binds; ++i) {
> > -		if (i < unbind_n_page_offfset ||
> > -		    i + 1 > unbind_n_page_offfset + unbind_n_pages) {
> > +		if (i < unbind_n_page_offset ||
> > +		    i + 1 > unbind_n_page_offset + unbind_n_pages) {
> >  			data = map + i * page_size;
> >  			igt_assert_eq(data->data, 0xc0ffee);
> >  		}
> > @@ -1511,13 +1524,13 @@ try_again_after_invalidate:
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  	if (flags & MAP_FLAG_USERPTR)
> >  		xe_vm_bind_userptr_async(fd, vm, 0,
> > -					 addr + unbind_n_page_offfset * page_size,
> > -					 addr + unbind_n_page_offfset * page_size,
> > +					 addr + unbind_n_page_offset * page_size,
> > +					 addr + unbind_n_page_offset * page_size,
> >  					 unbind_n_pages * page_size, sync, 1);
> >  	else
> >  		xe_vm_bind_async(fd, vm, 0, bo,
> > -				 unbind_n_page_offfset * page_size,
> > -				 addr + unbind_n_page_offfset * page_size,
> > +				 unbind_n_page_offset * page_size,
> > +				 addr + unbind_n_page_offset * page_size,
> >  				 unbind_n_pages * page_size, sync, 1);
> >  
> >  	/* Verify we can use every page */
> > @@ -1594,11 +1607,15 @@ try_again_after_invalidate:
> >   * @userptr-one-partial:		userptr one partial
> >   * @userptr-either-side-partial:	userptr either side partial
> >   * @userptr-either-side-full:		userptr either side full
> > + * @either-side-partial-large-page-hammer:
> > + *					either side partial large page hammer
> > + * @either-side-partial-split-page-hammer:
> > + *					either side partial split page hammer
> >   */
> >  
> >  static void
> >  test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> > -		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
> > +		     int bo_n_pages, int n_binds, int unbind_n_page_offset,
> >  		     int unbind_n_pages, unsigned int flags)
> >  {
> >  	struct drm_xe_sync sync[2] = {
> > @@ -1610,7 +1627,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> > +	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
> 
> why do we need to change the offset here?
> 

We need at least 2MB (large page) alignment.

> >  	uint32_t vm;
> >  	uint32_t engine;
> >  	size_t bo_size;
> > @@ -1627,6 +1644,14 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	struct thread_data t;
> >  	pthread_barrier_t barrier;
> >  	int exit = 0;
> > +	int n_page_per_2mb = 0x200000 / xe_get_default_alignment(fd);
> > +
> > +	if (flags & MAP_FLAG_LARGE_PAGE) {
> > +		bo_n_pages *= n_page_per_2mb;
> > +		unbind_n_pages *= n_page_per_2mb;
> > +		if (flags & MAP_FLAG_LARGE_PAGE_NO_SPLIT)
> > +			unbind_n_page_offset *= n_page_per_2mb;
> > +	}
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> >  	bo_size = page_size * bo_n_pages;
> > @@ -1721,13 +1746,13 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> >  	if (flags & MAP_FLAG_USERPTR)
> >  		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
> > -					 unbind_n_page_offfset * page_size,
> > -					 addr + unbind_n_page_offfset * page_size,
> > +					 unbind_n_page_offset * page_size,
> > +					 addr + unbind_n_page_offset * page_size,
> >  					 unbind_n_pages * page_size, sync, 2);
> >  	else
> >  		xe_vm_bind_async(fd, vm, 0, bo1,
> > -				 unbind_n_page_offfset * page_size,
> > -				 addr + unbind_n_page_offfset * page_size,
> > +				 unbind_n_page_offset * page_size,
> > +				 addr + unbind_n_page_offset * page_size,
> 
> probably the typo fix could be a separated patch to avoid noise here...

Yea, will split the s/offfset/offset/ into a diff patch.

> because now I'm wondering if the addr change is a change that is needed for
> the large page or just something extra this patch covered like the typo here.
> 
> But anyway, it is already here, no need to change...
> 
> Everything looks okay on the test itself... and I will eventually get the
> addr choices...
> 
> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
>

Thanks.

Matt
 
> >  				 unbind_n_pages * page_size, sync, 2);
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> > @@ -1826,7 +1851,7 @@ igt_main
> >  		const char *name;
> >  		int bo_n_pages;
> >  		int n_binds;
> > -		int unbind_n_page_offfset;
> > +		int unbind_n_page_offset;
> >  		int unbind_n_pages;
> >  		unsigned int flags;
> >  	} munmap_sections[] = {
> > @@ -1835,6 +1860,13 @@ igt_main
> >  		{ "either-side-partial", 4, 2, 1, 2, 0 },
> >  		{ "either-side-partial-hammer", 4, 2, 1, 2,
> >  			MAP_FLAG_HAMMER_FIRST_PAGE },
> > +		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE |
> > +			MAP_FLAG_LARGE_PAGE },
> > +		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE |
> > +			MAP_FLAG_LARGE_PAGE |
> > +			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
> >  		{ "either-side-full", 4, 4, 1, 2, 0 },
> >  		{ "end", 4, 2, 0, 3, 0 },
> >  		{ "front", 4, 2, 1, 3, 0 },
> > @@ -1887,6 +1919,13 @@ igt_main
> >  		{ "either-side-full", 4, 4, 1, 2, 0 },
> >  		{ "either-side-partial-hammer", 4, 2, 1, 2,
> >  			MAP_FLAG_HAMMER_FIRST_PAGE },
> > +		{ "either-side-partial-split-page-hammer", 4, 2, 1, 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE |
> > +			MAP_FLAG_LARGE_PAGE },
> > +		{ "either-side-partial-large-page-hammer", 4, 2, 1, 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE |
> > +			MAP_FLAG_LARGE_PAGE |
> > +			MAP_FLAG_LARGE_PAGE_NO_SPLIT },
> >  		{ "end", 4, 2, 0, 3, 0 },
> >  		{ "front", 4, 2, 1, 3, 0 },
> >  		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> > @@ -2111,7 +2150,7 @@ igt_main
> >  			test_munmap_style_unbind(fd, hwe_non_copy,
> >  						 s->bo_n_pages,
> >  						 s->n_binds,
> > -						 s->unbind_n_page_offfset,
> > +						 s->unbind_n_page_offset,
> >  						 s->unbind_n_pages,
> >  						 s->flags);
> >  		}
> > @@ -2125,7 +2164,7 @@ igt_main
> >  			test_mmap_style_bind(fd, hwe_non_copy,
> >  					     s->bo_n_pages,
> >  					     s->n_binds,
> > -					     s->unbind_n_page_offfset,
> > +					     s->unbind_n_page_offset,
> >  					     s->unbind_n_pages,
> >  					     s->flags);
> >  		}
> > -- 
> > 2.34.1
> > 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section
  2023-07-13 15:24   ` Rodrigo Vivi
@ 2023-07-14  4:13     ` Matthew Brost
  0 siblings, 0 replies; 19+ messages in thread
From: Matthew Brost @ 2023-07-14  4:13 UTC (permalink / raw)
  To: Rodrigo Vivi; +Cc: igt-dev

On Thu, Jul 13, 2023 at 11:24:47AM -0400, Rodrigo Vivi wrote:
> On Mon, Jul 10, 2023 at 07:58:53AM -0700, Matthew Brost wrote:
> > GPUVA added support for this let's test it.
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >  tests/xe/xe_vm.c | 350 ++++++++++++++++++++++++++++++++++++++++++-----
> >  1 file changed, 314 insertions(+), 36 deletions(-)
> > 
> > diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> > index 04d6c3956..434bdb332 100644
> > --- a/tests/xe/xe_vm.c
> > +++ b/tests/xe/xe_vm.c
> > @@ -1249,9 +1249,9 @@ static void *hammer_thread(void *tdata)
> >  	return NULL;
> >  }
> >  
> > -#define MUNMAP_FLAG_USERPTR		(0x1 << 0)
> > -#define MUNMAP_FLAG_INVALIDATE		(0x1 << 1)
> > -#define MUNMAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> > +#define MAP_FLAG_USERPTR		(0x1 << 0)
> > +#define MAP_FLAG_INVALIDATE		(0x1 << 1)
> > +#define MAP_FLAG_HAMMER_FIRST_PAGE	(0x1 << 2)
> >  
> >  
> >  /**
> > @@ -1344,7 +1344,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> >  	bo_size = page_size * bo_n_pages;
> >  
> > -	if (flags & MUNMAP_FLAG_USERPTR) {
> > +	if (flags & MAP_FLAG_USERPTR) {
> >  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> >  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> >  			    MAP_ANONYMOUS, -1, 0);
> > @@ -1363,7 +1363,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	/* Do initial binds */
> >  	bind_size = (page_size * bo_n_pages) / n_binds;
> >  	for (i = 0; i < n_binds; ++i) {
> > -		if (flags & MUNMAP_FLAG_USERPTR)
> > +		if (flags & MAP_FLAG_USERPTR)
> >  			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
> >  						 bind_size, sync, 1);
> >  		else
> > @@ -1378,7 +1378,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	 * cause a fault if a rebind occurs during munmap style VM unbind
> >  	 * (partial VMAs unbound).
> >  	 */
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		t.fd = fd;
> >  		t.vm = vm;
> >  #define PAGE_SIZE	4096
> > @@ -1437,7 +1437,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  		data = map + i * page_size;
> >  		igt_assert_eq(data->data, 0xc0ffee);
> >  	}
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		memset(map, 0, PAGE_SIZE / 2);
> >  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> >  	} else {
> > @@ -1487,7 +1487,7 @@ try_again_after_invalidate:
> >  			igt_assert_eq(data->data, 0xc0ffee);
> >  		}
> >  	}
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		memset(map, 0, PAGE_SIZE / 2);
> >  		memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> >  	} else {
> > @@ -1498,7 +1498,7 @@ try_again_after_invalidate:
> >  	 * The munmap style VM unbind can create new VMAs, make sure those are
> >  	 * in the bookkeeping for another rebind after a userptr invalidate.
> >  	 */
> > -	if (flags & MUNMAP_FLAG_INVALIDATE && !invalidate++) {
> > +	if (flags & MAP_FLAG_INVALIDATE && !invalidate++) {
> >  		map = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> >  			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> >  			    MAP_ANONYMOUS, -1, 0);
> > @@ -1509,7 +1509,7 @@ try_again_after_invalidate:
> >  	/* Confirm unbound region can be rebound */
> >  	syncobj_reset(fd, &sync[0].handle, 1);
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > -	if (flags & MUNMAP_FLAG_USERPTR)
> > +	if (flags & MAP_FLAG_USERPTR)
> >  		xe_vm_bind_userptr_async(fd, vm, 0,
> >  					 addr + unbind_n_page_offfset * page_size,
> >  					 addr + unbind_n_page_offfset * page_size,
> > @@ -1557,7 +1557,7 @@ try_again_after_invalidate:
> >  		igt_assert_eq(data->data, 0xc0ffee);
> >  	}
> >  
> > -	if (flags & MUNMAP_FLAG_HAMMER_FIRST_PAGE) {
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> >  		exit = 1;
> >  		pthread_join(t.thread, NULL);
> >  		pthread_barrier_destroy(&barrier);
> > @@ -1572,6 +1572,251 @@ try_again_after_invalidate:
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > +/**
> > + * SUBTEST: mmap-style-bind-%s
> > + * Description: Test mmap style unbind with %arg[1]
> > + * Run type: FULL
> > + * TODO: change ``'Run type' == FULL`` to a better category
> > + *
> > + * arg[1]:
> > + *
> > + * @all:				all
> > + * @one-partial:			one partial
> > + * @either-side-partial:		either side partial
> > + * @either-side-full:			either side full
> > + * @either-side-partial-hammer:		either side partial hammer
> > + * @end:				end
> > + * @front:				front
> > + * @many-all:				many all
> > + * @many-either-side-partial:		many either side partial
> > + * @many-either-side-partial-hammer:	many either side partial hammer
> > + * @userptr-all:			userptr all
> > + * @userptr-one-partial:		userptr one partial
> > + * @userptr-either-side-partial:	userptr either side partial
> > + * @userptr-either-side-full:		userptr either side full
> > + */
> > +
> > +static void
> > +test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> > +		     int bo_n_pages, int n_binds, int unbind_n_page_offfset,
> > +		     int unbind_n_pages, unsigned int flags)
> > +{
> > +	struct drm_xe_sync sync[2] = {
> > +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> > +		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> > +	};
> > +	struct drm_xe_exec exec = {
> > +		.num_batch_buffer = 1,
> > +		.num_syncs = 2,
> > +		.syncs = to_user_pointer(sync),
> > +	};
> > +	uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
> > +	uint32_t vm;
> > +	uint32_t engine;
> > +	size_t bo_size;
> > +	uint32_t bo0 = 0, bo1 = 0;
> > +	uint64_t bind_size;
> > +	uint64_t page_size = xe_get_default_alignment(fd);
> 
> Do we have plans to add support to the other supported cases with different
> offsets, alignment, etc?
> 

The next patch does some of this. The way the test is structured you
just need to add a table entry too. 

> anyway this test looks correct to me
> 
> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> 

Thanks.

Matt

> > +	struct {
> > +		uint32_t batch[16];
> > +		uint64_t pad;
> > +		uint32_t data;
> > +	} *data;
> > +	void *map0, *map1;
> > +	int i, b;
> > +	struct thread_data t;
> > +	pthread_barrier_t barrier;
> > +	int exit = 0;
> > +
> > +	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > +	bo_size = page_size * bo_n_pages;
> > +
> > +	if (flags & MAP_FLAG_USERPTR) {
> > +		map0 = mmap(from_user_pointer(addr), bo_size, PROT_READ |
> > +			    PROT_WRITE, MAP_SHARED | MAP_FIXED |
> > +			    MAP_ANONYMOUS, -1, 0);
> > +		map1 = mmap(from_user_pointer(addr + bo_size),
> > +			    bo_size, PROT_READ | PROT_WRITE, MAP_SHARED |
> > +			    MAP_FIXED | MAP_ANONYMOUS, -1, 0);
> > +		igt_assert(map0 != MAP_FAILED);
> > +		igt_assert(map1 != MAP_FAILED);
> > +	} else {
> > +		bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> > +		map0 = xe_bo_map(fd, bo0, bo_size);
> > +		bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
> > +		map1 = xe_bo_map(fd, bo1, bo_size);
> > +	}
> > +	memset(map0, 0, bo_size);
> > +	memset(map1, 0, bo_size);
> > +
> > +	engine = xe_engine_create(fd, vm, eci, 0);
> > +
> > +	sync[0].handle = syncobj_create(fd, 0);
> > +	sync[1].handle = syncobj_create(fd, 0);
> > +
> > +	/* Do initial binds */
> > +	bind_size = (page_size * bo_n_pages) / n_binds;
> > +	for (i = 0; i < n_binds; ++i) {
> > +		if (flags & MAP_FLAG_USERPTR)
> > +			xe_vm_bind_userptr_async(fd, vm, 0, addr, addr,
> > +						 bind_size, sync, 1);
> > +		else
> > +			xe_vm_bind_async(fd, vm, 0, bo0, i * bind_size,
> > +					 addr, bind_size, sync, 1);
> > +		addr += bind_size;
> > +	}
> > +	addr = base_addr;
> > +
> > +	/*
> > +	 * Kick a thread to write the first page continously to ensure we can't
> > +	 * cause a fault if a rebind occurs during munmap style VM unbind
> > +	 * (partial VMAs unbound).
> > +	 */
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> > +		t.fd = fd;
> > +		t.vm = vm;
> > +#define PAGE_SIZE	4096
> > +		t.addr = addr + PAGE_SIZE / 2;
> > +		t.eci = eci;
> > +		t.exit = &exit;
> > +		t.map = map0 + PAGE_SIZE / 2;
> > +		t.barrier = &barrier;
> > +		pthread_barrier_init(&barrier, NULL, 2);
> > +		pthread_create(&t.thread, 0, hammer_thread, &t);
> > +		pthread_barrier_wait(&barrier);
> > +	}
> > +
> > +	/* Verify we can use every page */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > +		uint64_t batch_addr = addr + batch_offset;
> > +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > +		uint64_t sdi_addr = addr + sdi_offset;
> > +		data = map0 + i * page_size;
> > +
> > +		b = 0;
> > +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > +		data->batch[b++] = sdi_addr;
> > +		data->batch[b++] = sdi_addr >> 32;
> > +		data->batch[b++] = 0xc0ffee;
> > +		data->batch[b++] = MI_BATCH_BUFFER_END;
> > +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> > +		if (i)
> > +			syncobj_reset(fd, &sync[1].handle, 1);
> > +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> > +
> > +		exec.engine_id = engine;
> > +		exec.address = batch_addr;
> > +		xe_exec(fd, &exec);
> > +
> > +		addr += page_size;
> > +	}
> > +	addr = base_addr;
> > +
> > +	/* Bind some of the pages to different BO / userptr */
> > +	syncobj_reset(fd, &sync[0].handle, 1);
> > +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > +	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> > +	if (flags & MAP_FLAG_USERPTR)
> > +		xe_vm_bind_userptr_async(fd, vm, 0, addr + bo_size +
> > +					 unbind_n_page_offfset * page_size,
> > +					 addr + unbind_n_page_offfset * page_size,
> > +					 unbind_n_pages * page_size, sync, 2);
> > +	else
> > +		xe_vm_bind_async(fd, vm, 0, bo1,
> > +				 unbind_n_page_offfset * page_size,
> > +				 addr + unbind_n_page_offfset * page_size,
> > +				 unbind_n_pages * page_size, sync, 2);
> > +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> > +
> > +	/* Verify all pages written */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		data = map0 + i * page_size;
> > +		igt_assert_eq(data->data, 0xc0ffee);
> > +	}
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> > +		memset(map0, 0, PAGE_SIZE / 2);
> > +		memset(map0 + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
> > +	} else {
> > +		memset(map0, 0, bo_size);
> > +		memset(map1, 0, bo_size);
> > +	}
> > +
> > +	/* Verify we can use every page */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		uint64_t batch_offset = (char *)&data->batch - (char *)data;
> > +		uint64_t batch_addr = addr + batch_offset;
> > +		uint64_t sdi_offset = (char *)&data->data - (char *)data;
> > +		uint64_t sdi_addr = addr + sdi_offset;
> > +
> > +		data = map0 + i * page_size;
> > +		b = 0;
> > +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > +		data->batch[b++] = sdi_addr;
> > +		data->batch[b++] = sdi_addr >> 32;
> > +		data->batch[b++] = 0xc0ffee;
> > +		data->batch[b++] = MI_BATCH_BUFFER_END;
> > +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > +		data = map1 + i * page_size;
> > +		b = 0;
> > +		data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > +		data->batch[b++] = sdi_addr;
> > +		data->batch[b++] = sdi_addr >> 32;
> > +		data->batch[b++] = 0xc0ffee;
> > +		data->batch[b++] = MI_BATCH_BUFFER_END;
> > +		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> > +
> > +		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> > +		if (i)
> > +			syncobj_reset(fd, &sync[1].handle, 1);
> > +		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> > +
> > +		exec.engine_id = engine;
> > +		exec.address = batch_addr;
> > +		xe_exec(fd, &exec);
> > +
> > +		addr += page_size;
> > +	}
> > +	addr = base_addr;
> > +
> > +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > +	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> > +
> > +	/* Verify all pages written */
> > +	for (i = 0; i < n_binds; ++i) {
> > +		uint32_t result = 0;
> > +
> > +		data = map0 + i * page_size;
> > +		result |= data->data;
> > +
> > +		data = map1 + i * page_size;
> > +		result |= data->data;
> > +
> > +		igt_assert_eq(result, 0xc0ffee);
> > +	}
> > +
> > +	if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
> > +		exit = 1;
> > +		pthread_join(t.thread, NULL);
> > +		pthread_barrier_destroy(&barrier);
> > +	}
> > +
> > +	syncobj_destroy(fd, sync[0].handle);
> > +	syncobj_destroy(fd, sync[1].handle);
> > +	xe_engine_destroy(fd, engine);
> > +	munmap(map0, bo_size);
> > +	munmap(map1, bo_size);
> > +	if (bo0)
> > +		gem_close(fd, bo0);
> > +	if (bo1)
> > +		gem_close(fd, bo1);
> > +	xe_vm_destroy(fd, vm);
> > +}
> > +
> >  igt_main
> >  {
> >  	struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
> > @@ -1584,55 +1829,74 @@ igt_main
> >  		int unbind_n_page_offfset;
> >  		int unbind_n_pages;
> >  		unsigned int flags;
> > -	} sections[] = {
> > +	} munmap_sections[] = {
> >  		{ "all", 4, 2, 0, 4, 0 },
> >  		{ "one-partial", 4, 1, 1, 2, 0 },
> >  		{ "either-side-partial", 4, 2, 1, 2, 0 },
> >  		{ "either-side-partial-hammer", 4, 2, 1, 2,
> > -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> >  		{ "either-side-full", 4, 4, 1, 2, 0 },
> >  		{ "end", 4, 2, 0, 3, 0 },
> >  		{ "front", 4, 2, 1, 3, 0 },
> >  		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> >  		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
> >  		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> > -			MUNMAP_FLAG_HAMMER_FIRST_PAGE },
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> >  		{ "many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8, 0 },
> >  		{ "many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2, 0 },
> >  		{ "many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2, 0 },
> > -		{ "userptr-all", 4, 2, 0, 4, MUNMAP_FLAG_USERPTR },
> > -		{ "userptr-one-partial", 4, 1, 1, 2, MUNMAP_FLAG_USERPTR },
> > +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> > +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
> >  		{ "userptr-either-side-partial", 4, 2, 1, 2,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-either-side-full", 4, 4, 1, 2,
> > -			MUNMAP_FLAG_USERPTR },
> > -		{ "userptr-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR },
> > -		{ "userptr-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> > +		{ "userptr-end", 4, 2, 0, 3, MAP_FLAG_USERPTR },
> > +		{ "userptr-front", 4, 2, 1, 3, MAP_FLAG_USERPTR },
> >  		{ "userptr-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-many-either-side-full", 4 * 8, 4 * 8, 1 * 8, 2 * 8,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR },
> > +			MAP_FLAG_USERPTR },
> >  		{ "userptr-inval-either-side-full", 4, 4, 1, 2,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > -		{ "userptr-inval-end", 4, 2, 0, 3, MUNMAP_FLAG_USERPTR |
> > -			MUNMAP_FLAG_INVALIDATE },
> > -		{ "userptr-inval-front", 4, 2, 1, 3, MUNMAP_FLAG_USERPTR |
> > -			MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> > +		{ "userptr-inval-end", 4, 2, 0, 3, MAP_FLAG_USERPTR |
> > +			MAP_FLAG_INVALIDATE },
> > +		{ "userptr-inval-front", 4, 2, 1, 3, MAP_FLAG_USERPTR |
> > +			MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-either-side-partial", 4 * 8, 2 * 8, 1,
> > -			4 * 8 - 2, MUNMAP_FLAG_USERPTR |
> > -				MUNMAP_FLAG_INVALIDATE },
> > +			4 * 8 - 2, MAP_FLAG_USERPTR |
> > +				MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-either-side-full", 4 * 8, 4 * 8, 1 * 8,
> > -			2 * 8, MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			2 * 8, MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-end", 4 * 8, 4, 0 * 8, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> >  		{ "userptr-inval-many-front", 4 * 8, 4, 1 * 8 - 2, 3 * 8 + 2,
> > -			MUNMAP_FLAG_USERPTR | MUNMAP_FLAG_INVALIDATE },
> > +			MAP_FLAG_USERPTR | MAP_FLAG_INVALIDATE },
> > +		{ NULL },
> > +	};
> > +	const struct section mmap_sections[] = {
> > +		{ "all", 4, 2, 0, 4, 0 },
> > +		{ "one-partial", 4, 1, 1, 2, 0 },
> > +		{ "either-side-partial", 4, 2, 1, 2, 0 },
> > +		{ "either-side-full", 4, 4, 1, 2, 0 },
> > +		{ "either-side-partial-hammer", 4, 2, 1, 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> > +		{ "end", 4, 2, 0, 3, 0 },
> > +		{ "front", 4, 2, 1, 3, 0 },
> > +		{ "many-all", 4 * 8, 2 * 8, 0 * 8, 4 * 8, 0 },
> > +		{ "many-either-side-partial", 4 * 8, 2 * 8, 1, 4 * 8 - 2, 0 },
> > +		{ "many-either-side-partial-hammer", 4 * 8, 2 * 8, 1, 4 * 8 - 2,
> > +			MAP_FLAG_HAMMER_FIRST_PAGE },
> > +		{ "userptr-all", 4, 2, 0, 4, MAP_FLAG_USERPTR },
> > +		{ "userptr-one-partial", 4, 1, 1, 2, MAP_FLAG_USERPTR },
> > +		{ "userptr-either-side-partial", 4, 2, 1, 2, MAP_FLAG_USERPTR },
> > +		{ "userptr-either-side-full", 4, 4, 1, 2, MAP_FLAG_USERPTR },
> >  		{ NULL },
> >  	};
> >  
> > @@ -1839,7 +2103,7 @@ igt_main
> >  			break;
> >  		}
> >  
> > -	for (const struct section *s = sections; s->name; s++) {
> > +	for (const struct section *s = munmap_sections; s->name; s++) {
> >  		igt_subtest_f("munmap-style-unbind-%s", s->name) {
> >  			igt_require_f(hwe_non_copy,
> >  				      "Requires non-copy engine to run\n");
> > @@ -1853,6 +2117,20 @@ igt_main
> >  		}
> >  	}
> >  
> > +	for (const struct section *s = mmap_sections; s->name; s++) {
> > +		igt_subtest_f("mmap-style-bind-%s", s->name) {
> > +			igt_require_f(hwe_non_copy,
> > +				      "Requires non-copy engine to run\n");
> > +
> > +			test_mmap_style_bind(fd, hwe_non_copy,
> > +					     s->bo_n_pages,
> > +					     s->n_binds,
> > +					     s->unbind_n_page_offfset,
> > +					     s->unbind_n_pages,
> > +					     s->flags);
> > +		}
> > +	}
> > +
> >  	igt_fixture
> >  		drm_close_driver(fd);
> >  }
> > -- 
> > 2.34.1
> > 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
  2023-07-13 15:19 ` [igt-dev] [PATCH 1/5] " Rodrigo Vivi
@ 2023-07-14 18:00   ` Matthew Brost
  2023-07-14 18:03     ` Rodrigo Vivi
  0 siblings, 1 reply; 19+ messages in thread
From: Matthew Brost @ 2023-07-14 18:00 UTC (permalink / raw)
  To: Rodrigo Vivi; +Cc: igt-dev

On Thu, Jul 13, 2023 at 11:19:00AM -0400, Rodrigo Vivi wrote:
> On Mon, Jul 10, 2023 at 07:58:52AM -0700, Matthew Brost wrote:
> > Update xe_exec_basic which create a NULL binding for store data address,
> > this store should just be dropped.
> > 
> > Acked-by: Mauro Carvalho Chehab <mchehab@kernel.org>
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >  tests/xe/xe_exec_basic.c | 33 ++++++++++++++++++++++++++++-----
> >  1 file changed, 28 insertions(+), 5 deletions(-)
> > 
> > diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
> > index af581c327..5624d31aa 100644
> > --- a/tests/xe/xe_exec_basic.c
> > +++ b/tests/xe/xe_exec_basic.c
> > @@ -28,6 +28,7 @@
> >  #define BIND_ENGINE	(0x1 << 4)
> >  #define DEFER_ALLOC	(0x1 << 5)
> >  #define DEFER_BIND	(0x1 << 6)
> > +#define SPARSE		(0x1 << 7)
> >  
> >  /**
> >   * SUBTEST: once-%s
> > @@ -70,6 +71,10 @@
> >   * @bindengine-userptr-rebind:		bind engine userptr rebind
> >   * @bindengine-userptr-invalidate:	bind engine userptr invalidate
> >   * @bindengine-userptr-invalidate-race:	bind engine userptr invalidate racy
> > + * @null:				null
> > + * @null-defer-mmap:			null defer mmap
> > + * @null-defer-bind:			null defer bind
> > + * @null-rebind:			null rebind
> >   */
> >  
> >  static void
> > @@ -86,6 +91,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> >  	uint64_t addr[MAX_N_ENGINES];
> > +	uint64_t sparse_addr[MAX_N_ENGINES];
> >  	uint32_t vm[MAX_N_ENGINES];
> >  	uint32_t engines[MAX_N_ENGINES];
> >  	uint32_t bind_engines[MAX_N_ENGINES];
> > @@ -110,8 +116,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  			xe_get_default_alignment(fd));
> >  
> >  	addr[0] = 0x1a0000;
> > -	for (i = 1; i < MAX_N_ENGINES; ++i)
> > +	sparse_addr[0] = 0x301a0000;
> 
> Why 0x301a0000?
> (Although I also never understood where the 0x1a0000 also came from to start with...)
> 

Random address, just different.

> > +	for (i = 1; i < MAX_N_ENGINES; ++i) {
> >  		addr[i] = addr[i - 1] + (0x1ull << 32);
> > +		sparse_addr[i] = sparse_addr[i - 1] + (0x1ull << 32);
> > +	}
> >  
> >  	if (flags & USERPTR) {
> >  #define	MAP_ADDRESS	0x00007fadeadbe000
> > @@ -160,6 +169,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  			xe_vm_bind_userptr_async(fd, vm[i], bind_engines[i],
> >  						 to_user_pointer(data), addr[i],
> >  						 bo_size, sync, 1);
> > +		if (flags & SPARSE)
> > +			__xe_vm_bind_assert(fd, vm[i], bind_engines[i],
> > +					    0, 0, sparse_addr[i], bo_size,
> > +					    XE_VM_BIND_OP_MAP |
> > +					    XE_VM_BIND_FLAG_ASYNC |
> > +					    XE_VM_BIND_FLAG_NULL, sync,
> > +					    1, 0, 0);
> >  	}
> >  
> >  	if (flags & DEFER_BIND)
> > @@ -171,7 +187,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> >  		uint64_t batch_addr = __addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> > -		uint64_t sdi_addr = __addr + sdi_offset;
> > +		uint64_t sdi_addr = (flags & SPARSE ? sparse_addr[i % n_vm] :
> > +				     __addr)+ sdi_offset;
> >  		int e = i % n_engines;
> >  
> >  		b = 0;
> > @@ -258,9 +275,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  					INT64_MAX, 0, NULL));
> >  	}
> >  
> > -	for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
> > -	     i < n_execs; i++)
> > -		igt_assert_eq(data[i].data, 0xc0ffee);
> > +	if (!(flags & SPARSE)) {
> > +		for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
> > +		     i < n_execs; i++)
> > +			igt_assert_eq(data[i].data, 0xc0ffee);
> > +	}
> 
> As far as I could see, the basic exec also happens here, and this null bind
> for sparse is an extra one, so why not check the correctness of that basic anyway?
> 
> oh, and if we check the basic we also need to add 'basic-' in the subtest
> names below...
> 

This is checking the DW write in the non-sparse sections, in the sparse
sections the DW write to NULL binding (writes dropped, read zero).

> >  
> >  	for (i = 0; i < n_engines; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > @@ -293,6 +312,10 @@ igt_main
> >  		{ "basic-defer-bind", DEFER_ALLOC | DEFER_BIND },
> >  		{ "userptr", USERPTR },
> >  		{ "rebind", REBIND },
> > +		{ "null", SPARSE },
> 
> and talking about the naming... is the XE_VM_BIND_FLAG_NULL only used for sparse?
> Is any bind for sparse required to use the NULL?

NULL binding == writes dropped, read zero

Sparse is VK term and yes they use NULL bindings.

> It looks to me that we have a strange mapping name here and we
> should stick to either
> { "sparse", SPARSE },
> { "null", NULL },
> 
> but maybe it is just me missing something here...
>

Yea weird naming, used SPARSE rather than NULL for the define as NULL is
reserved.

Matt
 
> > +		{ "null-defer-mmap", SPARSE | DEFER_ALLOC },
> > +		{ "null-defer-bind", SPARSE | DEFER_ALLOC | DEFER_BIND },
> > +		{ "null-rebind", SPARSE | REBIND },
> >  		{ "userptr-rebind", USERPTR | REBIND },
> >  		{ "userptr-invalidate", USERPTR | INVALIDATE },
> >  		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> > -- 
> > 2.34.1
> > 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings
  2023-07-14 18:00   ` Matthew Brost
@ 2023-07-14 18:03     ` Rodrigo Vivi
  0 siblings, 0 replies; 19+ messages in thread
From: Rodrigo Vivi @ 2023-07-14 18:03 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Fri, Jul 14, 2023 at 06:00:49PM +0000, Matthew Brost wrote:
> On Thu, Jul 13, 2023 at 11:19:00AM -0400, Rodrigo Vivi wrote:
> > On Mon, Jul 10, 2023 at 07:58:52AM -0700, Matthew Brost wrote:
> > > Update xe_exec_basic which create a NULL binding for store data address,
> > > this store should just be dropped.
> > > 
> > > Acked-by: Mauro Carvalho Chehab <mchehab@kernel.org>
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > ---
> > >  tests/xe/xe_exec_basic.c | 33 ++++++++++++++++++++++++++++-----
> > >  1 file changed, 28 insertions(+), 5 deletions(-)
> > > 
> > > diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
> > > index af581c327..5624d31aa 100644
> > > --- a/tests/xe/xe_exec_basic.c
> > > +++ b/tests/xe/xe_exec_basic.c
> > > @@ -28,6 +28,7 @@
> > >  #define BIND_ENGINE	(0x1 << 4)
> > >  #define DEFER_ALLOC	(0x1 << 5)
> > >  #define DEFER_BIND	(0x1 << 6)
> > > +#define SPARSE		(0x1 << 7)
> > >  
> > >  /**
> > >   * SUBTEST: once-%s
> > > @@ -70,6 +71,10 @@
> > >   * @bindengine-userptr-rebind:		bind engine userptr rebind
> > >   * @bindengine-userptr-invalidate:	bind engine userptr invalidate
> > >   * @bindengine-userptr-invalidate-race:	bind engine userptr invalidate racy
> > > + * @null:				null
> > > + * @null-defer-mmap:			null defer mmap
> > > + * @null-defer-bind:			null defer bind
> > > + * @null-rebind:			null rebind
> > >   */
> > >  
> > >  static void
> > > @@ -86,6 +91,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > >  		.syncs = to_user_pointer(sync),
> > >  	};
> > >  	uint64_t addr[MAX_N_ENGINES];
> > > +	uint64_t sparse_addr[MAX_N_ENGINES];
> > >  	uint32_t vm[MAX_N_ENGINES];
> > >  	uint32_t engines[MAX_N_ENGINES];
> > >  	uint32_t bind_engines[MAX_N_ENGINES];
> > > @@ -110,8 +116,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > >  			xe_get_default_alignment(fd));
> > >  
> > >  	addr[0] = 0x1a0000;
> > > -	for (i = 1; i < MAX_N_ENGINES; ++i)
> > > +	sparse_addr[0] = 0x301a0000;
> > 
> > Why 0x301a0000?
> > (Although I also never understood where the 0x1a0000 also came from to start with...)
> > 
> 
> Random address, just different.
> 
> > > +	for (i = 1; i < MAX_N_ENGINES; ++i) {
> > >  		addr[i] = addr[i - 1] + (0x1ull << 32);
> > > +		sparse_addr[i] = sparse_addr[i - 1] + (0x1ull << 32);
> > > +	}
> > >  
> > >  	if (flags & USERPTR) {
> > >  #define	MAP_ADDRESS	0x00007fadeadbe000
> > > @@ -160,6 +169,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > >  			xe_vm_bind_userptr_async(fd, vm[i], bind_engines[i],
> > >  						 to_user_pointer(data), addr[i],
> > >  						 bo_size, sync, 1);
> > > +		if (flags & SPARSE)
> > > +			__xe_vm_bind_assert(fd, vm[i], bind_engines[i],
> > > +					    0, 0, sparse_addr[i], bo_size,
> > > +					    XE_VM_BIND_OP_MAP |
> > > +					    XE_VM_BIND_FLAG_ASYNC |
> > > +					    XE_VM_BIND_FLAG_NULL, sync,
> > > +					    1, 0, 0);
> > >  	}
> > >  
> > >  	if (flags & DEFER_BIND)
> > > @@ -171,7 +187,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > >  		uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> > >  		uint64_t batch_addr = __addr + batch_offset;
> > >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> > > -		uint64_t sdi_addr = __addr + sdi_offset;
> > > +		uint64_t sdi_addr = (flags & SPARSE ? sparse_addr[i % n_vm] :
> > > +				     __addr)+ sdi_offset;
> > >  		int e = i % n_engines;
> > >  
> > >  		b = 0;
> > > @@ -258,9 +275,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > >  					INT64_MAX, 0, NULL));
> > >  	}
> > >  
> > > -	for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
> > > -	     i < n_execs; i++)
> > > -		igt_assert_eq(data[i].data, 0xc0ffee);
> > > +	if (!(flags & SPARSE)) {
> > > +		for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
> > > +		     i < n_execs; i++)
> > > +			igt_assert_eq(data[i].data, 0xc0ffee);
> > > +	}
> > 
> > As far as I could see, the basic exec also happens here, and this null bind
> > for sparse is an extra one, so why not check the correctness of that basic anyway?
> > 
> > oh, and if we check the basic we also need to add 'basic-' in the subtest
> > names below...
> > 
> 
> This is checking the DW write in the non-sparse sections, in the sparse
> sections the DW write to NULL binding (writes dropped, read zero).
> 
> > >  
> > >  	for (i = 0; i < n_engines; i++) {
> > >  		syncobj_destroy(fd, syncobjs[i]);
> > > @@ -293,6 +312,10 @@ igt_main
> > >  		{ "basic-defer-bind", DEFER_ALLOC | DEFER_BIND },
> > >  		{ "userptr", USERPTR },
> > >  		{ "rebind", REBIND },
> > > +		{ "null", SPARSE },
> > 
> > and talking about the naming... is the XE_VM_BIND_FLAG_NULL only used for sparse?
> > Is any bind for sparse required to use the NULL?
> 
> NULL binding == writes dropped, read zero
> 
> Sparse is VK term and yes they use NULL bindings.
> 
> > It looks to me that we have a strange mapping name here and we
> > should stick to either
> > { "sparse", SPARSE },
> > { "null", NULL },
> > 
> > but maybe it is just me missing something here...
> >
> 
> Yea weird naming, used SPARSE rather than NULL for the define as NULL is
> reserved.

it all makes sense now... thanks for the answers

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>


> 
> Matt
>  
> > > +		{ "null-defer-mmap", SPARSE | DEFER_ALLOC },
> > > +		{ "null-defer-bind", SPARSE | DEFER_ALLOC | DEFER_BIND },
> > > +		{ "null-rebind", SPARSE | REBIND },
> > >  		{ "userptr-rebind", USERPTR | REBIND },
> > >  		{ "userptr-invalidate", USERPTR | INVALIDATE },
> > >  		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> > > -- 
> > > 2.34.1
> > > 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test
  2023-07-21  4:37 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
@ 2023-07-21 20:25   ` Rodrigo Vivi
  0 siblings, 0 replies; 19+ messages in thread
From: Rodrigo Vivi @ 2023-07-21 20:25 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

On Thu, Jul 20, 2023 at 09:37:34PM -0700, Matthew Brost wrote:
> Unmaps also exercise the bind conflict logic in Xe, add an unmap to the
> bind queue independent test to further test the conflict logic.
> 
> v2: new commit message (Rodrigo)

Thanks for this and for the extra explanation offline.

> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  tests/xe/xe_vm.c | 6 ++++++
>  1 file changed, 6 insertions(+)
> 
> diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> index 5fd511f0b8..36cd803577 100644
> --- a/tests/xe/xe_vm.c
> +++ b/tests/xe/xe_vm.c
> @@ -811,6 +811,12 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
>  				NULL));
>  	igt_assert_eq(data[0].data, 0xc0ffee);
>  
> +	syncobj_destroy(fd, sync[0].handle);
> +	sync[0].handle = syncobj_create(fd, 0);
> +	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> +	xe_vm_unbind_all_async(fd, vm, 0, bo, sync, 1);
> +	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));

it looks like this would work even without the conflict logic, but yeap,
it will exercise that piece of tree, and it doesn't hurt. So, let's move
with it:

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>


> +
>  	syncobj_destroy(fd, sync[0].handle);
>  	for (i = 0; i < N_ENGINES; i++) {
>  		syncobj_destroy(fd, syncobjs[i]);
> -- 
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test
  2023-07-21  4:37 Matthew Brost
@ 2023-07-21  4:37 ` Matthew Brost
  2023-07-21 20:25   ` Rodrigo Vivi
  0 siblings, 1 reply; 19+ messages in thread
From: Matthew Brost @ 2023-07-21  4:37 UTC (permalink / raw)
  To: igt-dev

Unmaps also exercise the bind conflict logic in Xe, add an unmap to the
bind queue independent test to further test the conflict logic.

v2: new commit message (Rodrigo)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/xe/xe_vm.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
index 5fd511f0b8..36cd803577 100644
--- a/tests/xe/xe_vm.c
+++ b/tests/xe/xe_vm.c
@@ -811,6 +811,12 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci)
 				NULL));
 	igt_assert_eq(data[0].data, 0xc0ffee);
 
+	syncobj_destroy(fd, sync[0].handle);
+	sync[0].handle = syncobj_create(fd, 0);
+	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
+	xe_vm_unbind_all_async(fd, vm, 0, bo, sync, 1);
+	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
+
 	syncobj_destroy(fd, sync[0].handle);
 	for (i = 0; i < N_ENGINES; i++) {
 		syncobj_destroy(fd, syncobjs[i]);
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2023-07-21 20:25 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-07-10 14:58 [igt-dev] [PATCH 1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Matthew Brost
2023-07-10 14:58 ` [igt-dev] [PATCH 2/5] xe_vm: MMAP style VM binds section Matthew Brost
2023-07-13 15:24   ` Rodrigo Vivi
2023-07-14  4:13     ` Matthew Brost
2023-07-10 14:58 ` [igt-dev] [PATCH 3/5] xe_vm: Add mmap / munmap sections that split large pages Matthew Brost
2023-07-13 15:30   ` Rodrigo Vivi
2023-07-14  4:11     ` Matthew Brost
2023-07-10 14:58 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
2023-07-13 15:32   ` Rodrigo Vivi
2023-07-14  4:08     ` Matthew Brost
2023-07-10 14:58 ` [igt-dev] [PATCH 5/5] xe_vm: add bind engine conflict section Matthew Brost
2023-07-13 15:34   ` Rodrigo Vivi
2023-07-10 16:09 ` [igt-dev] ✗ Fi.CI.BAT: failure for series starting with [1/5] xe/xe_exec_basic: A sections to test NULL (sparse) bindings Patchwork
2023-07-10 16:46 ` [igt-dev] ○ CI.xeBAT: info " Patchwork
2023-07-13 15:19 ` [igt-dev] [PATCH 1/5] " Rodrigo Vivi
2023-07-14 18:00   ` Matthew Brost
2023-07-14 18:03     ` Rodrigo Vivi
2023-07-21  4:37 Matthew Brost
2023-07-21  4:37 ` [igt-dev] [PATCH 4/5] xe_vm: Unmap BOs in bind queue independent test Matthew Brost
2023-07-21 20:25   ` Rodrigo Vivi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.